diff --git a/RELEASE.md b/RELEASE.md index 1ddf3e1ec310a0..2f6274ec0c31ca 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -31,6 +31,7 @@ been added to TF binary distributions (Python wheels). * Replace `DebuggerOptions` of TensorFlow Quantizer, and migrate to `DebuggerConfig` of StableHLO Quantizer. +* Add TensorFlow to StableHLO converter to TensorFlow pip package. ## Keras @@ -87,6 +88,8 @@ * The Python TF Lite Interpreter bindings now have an option `experimental_default_delegate_latest_features` to enable all default delegate features. + * Flatbuffer version update: + * `GetTemporaryPointer()` bug fixed. * `tf.data` * Add `wait` to `tf.data.Dataset.load`. If `True`, for snapshots written diff --git a/ci/official/containers/linux_arm64/build.sh b/ci/official/containers/linux_arm64/build.sh index 5d6a40658bd782..611d5f48ac0084 100755 --- a/ci/official/containers/linux_arm64/build.sh +++ b/ci/official/containers/linux_arm64/build.sh @@ -40,11 +40,15 @@ else fi fi +# TODO(b/341050361): When these steps are verified, removed the GCR image code. +AR_IMAGE_PATH="us-central1-docker.pkg.dev/tensorflow-sigs/tensorflow/build-arm64" + # Build for both JAX and TF usage. We do these in one place because they share # almost all of the same cache layers export DOCKER_BUILDKIT=1 for target in jax tf; do IMAGE="gcr.io/tensorflow-sigs/build-arm64:$target-$TAG" + AR_IMAGE="$AR_IMAGE_PATH:$target-$TAG" docker pull "$IMAGE" || true # Due to some flakiness of resources pulled in the build, allow the docker # command to reattempt build a few times in the case of failure (b/302558736) @@ -55,7 +59,7 @@ for target in jax tf; do --build-arg REQUIREMENTS_FILE=jax.requirements.txt \ --target=$target \ --cache-from "$IMAGE" \ - -t "$IMAGE" . && break + -t "$IMAGE" -t "$AR_IMAGE" . && break done final=$? if [ $final -ne 0 ]; then @@ -66,5 +70,7 @@ for target in jax tf; do if [[ -n "$KOKORO_BUILD_ID" ]]; then gcloud auth configure-docker docker push "$IMAGE" + gcloud auth configure-docker us-central1-docker.pkg.dev + docker push "$AR_IMAGE" fi done diff --git a/ci/official/utilities/setup_docker.sh b/ci/official/utilities/setup_docker.sh index 36afa2545eb244..91618c75f3ba51 100755 --- a/ci/official/utilities/setup_docker.sh +++ b/ci/official/utilities/setup_docker.sh @@ -14,11 +14,12 @@ # limitations under the License. # ============================================================================== if [[ "$TFCI_DOCKER_PULL_ENABLE" == 1 ]]; then - # Simple retry logic for docker-pull errors. Sleeps for 15s if a pull fails. + # Simple retry logic for docker-pull errors. Sleeps if a pull fails. # Pulling an already-pulled container image will finish instantly, so # repeating the command costs nothing. docker pull "$TFCI_DOCKER_IMAGE" || sleep 15 - docker pull "$TFCI_DOCKER_IMAGE" || sleep 15 + docker pull "$TFCI_DOCKER_IMAGE" || sleep 30 + docker pull "$TFCI_DOCKER_IMAGE" || sleep 60 docker pull "$TFCI_DOCKER_IMAGE" fi diff --git a/requirements_lock_3_10.txt b/requirements_lock_3_10.txt index 05dc3940487eef..f17468ddaafd0a 100644 --- a/requirements_lock_3_10.txt +++ b/requirements_lock_3_10.txt @@ -522,9 +522,9 @@ urllib3==2.2.0 \ --hash=sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20 \ --hash=sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224 # via requests -werkzeug==3.0.1 \ - --hash=sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc \ - --hash=sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10 +werkzeug==3.0.3 \ + --hash=sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18 \ + --hash=sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8 # via tb-nightly wheel==0.41.3 \ --hash=sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942 \ diff --git a/requirements_lock_3_11.txt b/requirements_lock_3_11.txt index 05dc3940487eef..f17468ddaafd0a 100644 --- a/requirements_lock_3_11.txt +++ b/requirements_lock_3_11.txt @@ -522,9 +522,9 @@ urllib3==2.2.0 \ --hash=sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20 \ --hash=sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224 # via requests -werkzeug==3.0.1 \ - --hash=sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc \ - --hash=sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10 +werkzeug==3.0.3 \ + --hash=sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18 \ + --hash=sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8 # via tb-nightly wheel==0.41.3 \ --hash=sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942 \ diff --git a/requirements_lock_3_12.txt b/requirements_lock_3_12.txt index 120ec6ebcd7c72..0d045ea1a0579c 100644 --- a/requirements_lock_3_12.txt +++ b/requirements_lock_3_12.txt @@ -530,9 +530,9 @@ urllib3==2.2.0 \ --hash=sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20 \ --hash=sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224 # via requests -werkzeug==3.0.1 \ - --hash=sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc \ - --hash=sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10 +werkzeug==3.0.3 \ + --hash=sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18 \ + --hash=sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8 # via tb-nightly wheel==0.41.3 \ --hash=sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942 \ diff --git a/requirements_lock_3_9.txt b/requirements_lock_3_9.txt index 36a55514cd788b..48c74173fe553f 100644 --- a/requirements_lock_3_9.txt +++ b/requirements_lock_3_9.txt @@ -526,9 +526,9 @@ urllib3==2.2.0 \ --hash=sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20 \ --hash=sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224 # via requests -werkzeug==3.0.1 \ - --hash=sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc \ - --hash=sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10 +werkzeug==3.0.3 \ + --hash=sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18 \ + --hash=sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8 # via tb-nightly wheel==0.41.3 \ --hash=sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942 \ diff --git a/tensorflow/BUILD b/tensorflow/BUILD index 71487e2aec0bee..a4cd4af8975bc2 100644 --- a/tensorflow/BUILD +++ b/tensorflow/BUILD @@ -1382,6 +1382,7 @@ tf_cc_shared_library( "//tensorflow/compiler/mlir/quantization/common/quantization_lib:quantization_config", "//tensorflow/compiler/mlir/lite/sparsity:sparsify_model", "//tensorflow/compiler/mlir/quantization/stablehlo/python:pywrap_quantization_lib_impl", + "//tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python:pywrap_tensorflow_to_stablehlo_lib_impl", "//tensorflow/compiler/mlir/quantization/tensorflow/calibrator:custom_aggregator_op", "//tensorflow/compiler/mlir/quantization/tensorflow/python:quantize_model_cc_impl", "//tensorflow/compiler/mlir/quantization/tensorflow:passes", @@ -1416,6 +1417,7 @@ tf_cc_shared_library( "//tensorflow/core/grappler:grappler_item_builder", "//tensorflow/core/kernels:data_service_ops", "//tensorflow/core/kernels:dataset_ops", + "//tensorflow/core/tpu/kernels:sparse_core_layout", "//tensorflow/core/platform:logging", "//tensorflow/core/platform:path", "//tensorflow/core/platform:stacktrace_handler", diff --git a/tensorflow/c/experimental/ops/gen/common/case_format.cc b/tensorflow/c/experimental/ops/gen/common/case_format.cc index 9b8e955356db07..1e9d123005e8a4 100644 --- a/tensorflow/c/experimental/ops/gen/common/case_format.cc +++ b/tensorflow/c/experimental/ops/gen/common/case_format.cc @@ -14,6 +14,9 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/c/experimental/ops/gen/common/case_format.h" +#include "tensorflow/core/platform/str_util.h" +#include "tensorflow/core/platform/types.h" + namespace tensorflow { namespace generator { diff --git a/tensorflow/c/experimental/ops/gen/common/case_format_test.cc b/tensorflow/c/experimental/ops/gen/common/case_format_test.cc index 37bc5be753fd64..302bcc42453169 100644 --- a/tensorflow/c/experimental/ops/gen/common/case_format_test.cc +++ b/tensorflow/c/experimental/ops/gen/common/case_format_test.cc @@ -15,6 +15,7 @@ limitations under the License. #include "tensorflow/c/experimental/ops/gen/common/case_format.h" #include "tensorflow/core/platform/test.h" +#include "tensorflow/core/platform/types.h" namespace tensorflow { namespace generator { diff --git a/tensorflow/c/experimental/ops/gen/common/controller.cc b/tensorflow/c/experimental/ops/gen/common/controller.cc index a8e02f41011d32..cafb57c0919403 100644 --- a/tensorflow/c/experimental/ops/gen/common/controller.cc +++ b/tensorflow/c/experimental/ops/gen/common/controller.cc @@ -15,11 +15,17 @@ limitations under the License. #include "tensorflow/c/experimental/ops/gen/common/controller.h" #include "absl/strings/substitute.h" +#include "tensorflow/c/experimental/ops/gen/common/path_config.h" +#include "tensorflow/c/experimental/ops/gen/common/source_code.h" +#include "tensorflow/c/experimental/ops/gen/model/op_spec.h" +#include "tensorflow/core/framework/api_def.pb.h" #include "tensorflow/core/framework/op.h" -#include "tensorflow/core/lib/io/path.h" -#include "tensorflow/core/lib/strings/str_util.h" +#include "tensorflow/core/framework/op_def.pb.h" +#include "tensorflow/core/framework/op_gen_lib.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/logging.h" +#include "tensorflow/core/platform/path.h" +#include "tsl/platform/status.h" namespace tensorflow { namespace generator { diff --git a/tensorflow/c/experimental/ops/gen/common/path_config.cc b/tensorflow/c/experimental/ops/gen/common/path_config.cc index d9e3881bf15580..b8f84d5f31f4d3 100644 --- a/tensorflow/c/experimental/ops/gen/common/path_config.cc +++ b/tensorflow/c/experimental/ops/gen/common/path_config.cc @@ -16,7 +16,9 @@ limitations under the License. #include +#include "absl/strings/str_join.h" #include "tensorflow/core/lib/strings/str_util.h" +#include "tensorflow/core/platform/types.h" namespace tensorflow { namespace generator { diff --git a/tensorflow/c/experimental/ops/gen/common/source_code.cc b/tensorflow/c/experimental/ops/gen/common/source_code.cc index ea4db53d167109..ea2b66fac7cd27 100644 --- a/tensorflow/c/experimental/ops/gen/common/source_code.cc +++ b/tensorflow/c/experimental/ops/gen/common/source_code.cc @@ -14,9 +14,12 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/c/experimental/ops/gen/common/source_code.h" +#include "absl/strings/ascii.h" #include "absl/strings/match.h" +#include "absl/strings/str_cat.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" +#include "tensorflow/core/platform/stringpiece.h" namespace tensorflow { namespace generator { diff --git a/tensorflow/c/experimental/ops/gen/common/view_util.cc b/tensorflow/c/experimental/ops/gen/common/view_util.cc index a14c7e38b63b46..7c8717067b08fe 100644 --- a/tensorflow/c/experimental/ops/gen/common/view_util.cc +++ b/tensorflow/c/experimental/ops/gen/common/view_util.cc @@ -14,7 +14,9 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/c/experimental/ops/gen/common/view_util.h" +#include "absl/strings/str_join.h" #include "absl/strings/substitute.h" +#include "tensorflow/core/platform/types.h" namespace tensorflow { namespace generator { diff --git a/tensorflow/c/experimental/stream_executor/stream_executor.cc b/tensorflow/c/experimental/stream_executor/stream_executor.cc index c524dd1d27ac89..65b31f8cfb8f1c 100644 --- a/tensorflow/c/experimental/stream_executor/stream_executor.cc +++ b/tensorflow/c/experimental/stream_executor/stream_executor.cc @@ -407,10 +407,6 @@ class CStreamExecutor : public StreamExecutor { return stream_executor_->host_callback(&device_, stream_handle, &HostCallbackTrampoline, ctx); } - absl::Status AllocateEvent(Event* event) override { - DCHECK(event != nullptr); - return static_cast(event->implementation())->Create(); - } absl::Status DeallocateEvent(Event* event) override { static_cast(event->implementation())->Destroy(); return absl::OkStatus(); @@ -438,14 +434,6 @@ class CStreamExecutor : public StreamExecutor { stream_executor_->get_event_status(&device_, event_handle); return SEEventStatusToEventStatus(event_status); } - bool AllocateStream(Stream* stream) override { - DCHECK(stream != nullptr); - absl::Status status = - static_cast(stream->implementation())->Create(); - // TODO(annarev): update AllocateStream to return status instead - // (similar to AllocateEvent). - return status.ok(); - } void DeallocateStream(Stream* stream) override { static_cast(stream->implementation())->Destroy(); } @@ -559,18 +547,18 @@ class CStreamExecutor : public StreamExecutor { return builder.Build(); } - // Each call creates a new instance of the platform-specific implementation of - // the corresponding interface type. - std::unique_ptr CreateEventImplementation() override { - return std::unique_ptr( - new CEvent(&device_, stream_executor_)); + absl::StatusOr> CreateEvent() override { + auto c_event = std::make_unique(&device_, stream_executor_); + TF_RETURN_IF_ERROR(c_event->Create()); + return std::make_unique(this, std::move(c_event)); } + absl::StatusOr> CreateStream( std::optional> priority = std::nullopt) override { - auto stream = std::make_unique( - this, std::make_unique(&device_, stream_executor_)); - TF_RETURN_IF_ERROR(stream->Initialize(priority)); + auto c_stream = std::make_unique(&device_, stream_executor_); + TF_RETURN_IF_ERROR(c_stream->Create()); + auto stream = std::make_unique(this, std::move(c_stream)); return std::move(stream); } diff --git a/tensorflow/c/experimental/stream_executor/stream_executor_test.cc b/tensorflow/c/experimental/stream_executor/stream_executor_test.cc index 56f25a5811293e..680a1d9d1db1f5 100644 --- a/tensorflow/c/experimental/stream_executor/stream_executor_test.cc +++ b/tensorflow/c/experimental/stream_executor/stream_executor_test.cc @@ -342,11 +342,10 @@ TEST_F(StreamExecutorTest, CreateEvent) { StreamExecutor* executor = GetExecutor(0); ASSERT_FALSE(event_created); - Event* event = new Event(executor); - event->Init(); + TF_ASSERT_OK_AND_ASSIGN(auto event, executor->CreateEvent()); ASSERT_TRUE(event_created); ASSERT_FALSE(event_deleted); - delete event; + event.reset(); ASSERT_TRUE(event_deleted); } @@ -365,11 +364,10 @@ TEST_F(StreamExecutorTest, PollForEventStatus) { }; StreamExecutor* executor = GetExecutor(0); - Event event(executor); - event.Init(); - ASSERT_EQ(event.PollForStatus(), Event::Status::kComplete); + TF_ASSERT_OK_AND_ASSIGN(auto event, executor->CreateEvent()); + ASSERT_EQ(event->PollForStatus(), Event::Status::kComplete); event_status = SE_EVENT_ERROR; - ASSERT_EQ(event.PollForStatus(), Event::Status::kError); + ASSERT_EQ(event->PollForStatus(), Event::Status::kError); } TEST_F(StreamExecutorTest, RecordAndWaitForEvent) { @@ -403,14 +401,13 @@ TEST_F(StreamExecutorTest, RecordAndWaitForEvent) { }; StreamExecutor* executor = GetExecutor(0); - Event event(executor); - event.Init(); + TF_ASSERT_OK_AND_ASSIGN(auto event, executor->CreateEvent()); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); ASSERT_FALSE(record_called); - TF_ASSERT_OK(stream->RecordEvent(&event)); + TF_ASSERT_OK(stream->RecordEvent(event.get())); ASSERT_TRUE(record_called); ASSERT_FALSE(wait_called); - TF_ASSERT_OK(stream->WaitFor(&event)); + TF_ASSERT_OK(stream->WaitFor(event.get())); ASSERT_TRUE(wait_called); } diff --git a/tensorflow/compiler/jit/BUILD b/tensorflow/compiler/jit/BUILD index 76f3c147903748..623334534567de 100644 --- a/tensorflow/compiler/jit/BUILD +++ b/tensorflow/compiler/jit/BUILD @@ -199,6 +199,7 @@ cc_library( "//tensorflow/core/tpu:tpu_node_device_util", "//tensorflow/core/tpu:virtual_device", "@com_google_absl//absl/types:optional", + "@local_tsl//tsl/platform:statusor", "@local_xla//xla/stream_executor/tpu:c_api_conversions", "@local_xla//xla/stream_executor/tpu:status_helper", "@local_xla//xla/stream_executor/tpu:tpu_api", @@ -314,6 +315,7 @@ cc_library( "//tensorflow/core/common_runtime:dma_helper", "//tensorflow/core/framework:allocator", "@com_google_absl//absl/synchronization", + "@local_tsl//tsl/platform:statusor", "@local_xla//xla:util", "@local_xla//xla/client:global_data", "@local_xla//xla/client:local_client", @@ -1149,6 +1151,7 @@ cc_library( "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/numeric:bits", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:span", "@local_xla//xla:status_macros", diff --git a/tensorflow/compiler/jit/device_util.h b/tensorflow/compiler/jit/device_util.h index df3b7d04fbfe7b..ec4d9484ae8854 100644 --- a/tensorflow/compiler/jit/device_util.h +++ b/tensorflow/compiler/jit/device_util.h @@ -20,6 +20,7 @@ limitations under the License. #include #include "absl/container/flat_hash_map.h" +#include "absl/numeric/bits.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" @@ -79,7 +80,7 @@ class DeviceSet { uint64 only_lowest_bit_set = word & -word; // The number of trailing zeros in a non-zero word is the index of the // least significant 1. - int bit_index = ctz_uint64(word); + int bit_index = absl::countr_zero(word); if (!func(DeviceId(word_index * kWordSize + bit_index))) { return; } @@ -89,20 +90,6 @@ class DeviceSet { } private: - static int ctz_uint64(uint64 x) { - DCHECK_NE(x, 0); -#ifdef __GNUC__ - return __builtin_ctzl(x); -#else - int result = 0u; - while ((x & 1u) == 0u) { - x >>= 1; - ++result; - } - return result; -#endif - } - absl::InlinedVector storage_; const int kWordSize = 64; diff --git a/tensorflow/compiler/jit/kernels/BUILD b/tensorflow/compiler/jit/kernels/BUILD index 0ac326c61fb3ec..d173564b7fd10d 100644 --- a/tensorflow/compiler/jit/kernels/BUILD +++ b/tensorflow/compiler/jit/kernels/BUILD @@ -59,8 +59,10 @@ cc_library( "//tensorflow/compiler/jit:xla_compile_util", "//tensorflow/core/platform:refcount", "@com_google_absl//absl/status", + "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings", "@local_xla//xla/pjrt:pjrt_client", + "@local_xla//xla/tsl/concurrency:async_value", ], alwayslink = 1, ) diff --git a/tensorflow/compiler/jit/kernels/xla_ops.cc b/tensorflow/compiler/jit/kernels/xla_ops.cc index 9d75388cfbbe80..5a29e8ef36e9b3 100644 --- a/tensorflow/compiler/jit/kernels/xla_ops.cc +++ b/tensorflow/compiler/jit/kernels/xla_ops.cc @@ -24,10 +24,10 @@ limitations under the License. #include #include #include -#include #include "absl/container/flat_hash_map.h" #include "absl/status/status.h" +#include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/types/optional.h" #include "tensorflow/compiler/jit/device_compilation_profiler.h" @@ -52,7 +52,7 @@ limitations under the License. #include "xla/executable_run_options.h" #include "xla/pjrt/pjrt_client.h" #include "xla/service/gpu/gpu_executable_run_options.h" -#include "xla/statusor.h" +#include "xla/tsl/concurrency/async_value_ref.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op_kernel.h" @@ -224,7 +224,7 @@ xla::SendDeviceMemoryFunction GetSendDeviceMemoryFunction( int64_t channel_id, se::Stream* stream, const xla::Shape& shape, const se::DeviceMemoryBase& device_memory_base, const absl::flat_hash_map& frontend_attrs) - -> absl::StatusOr> { + -> absl::StatusOr>> { auto iter = frontend_attrs.find("_xla_host_transfer_rendezvous"); // Generate the Rendezvous key. @@ -244,12 +244,10 @@ xla::SendDeviceMemoryFunction GetSendDeviceMemoryFunction( RendezvousInterface::ParsedKey parsed_key; TF_RETURN_IF_ERROR(Rendezvous::ParseKey(rendezvous_key, &parsed_key)); - tsl::AsyncValueRef done_event = - tsl::MakeConstructedAsyncValueRef(stream->parent()); - if (!done_event->Init()) { - return errors::Internal( - "Failed to initialize done event (channel_id=%d)", channel_id); - } + TF_ASSIGN_OR_RETURN(auto event, stream->parent()->CreateEvent()); + tsl::AsyncValueRef> done_event = + tsl::MakeConstructedAsyncValueRef>( + std::move(event)); Rendezvous::Args args; // Rendezvous::Args owns the device context pointer. @@ -273,7 +271,7 @@ xla::RecvDeviceMemoryFunction GetRecvDeviceMemoryFunction( int64_t channel_id, se::Stream* stream, const xla::Shape& shape, se::DeviceMemoryBase* device_memory_base, const absl::flat_hash_map& frontend_attrs) - -> absl::StatusOr> { + -> absl::StatusOr>> { auto iter = frontend_attrs.find("_xla_host_transfer_rendezvous"); // Generate the Rendezvous key. @@ -293,12 +291,10 @@ xla::RecvDeviceMemoryFunction GetRecvDeviceMemoryFunction( RendezvousInterface::ParsedKey parsed_key; TF_RETURN_IF_ERROR(Rendezvous::ParseKey(rendezvous_key, &parsed_key)); - tsl::AsyncValueRef done_event = - tsl::MakeConstructedAsyncValueRef(stream->parent()); - if (!done_event->Init()) { - return errors::Internal( - "Failed to initialize done event (channel_id=%d)", channel_id); - } + TF_ASSIGN_OR_RETURN(auto event, stream->parent()->CreateEvent()); + tsl::AsyncValueRef> done_event = + tsl::MakeConstructedAsyncValueRef>( + std::move(event)); Rendezvous::Args args; // Rendezvous::Args owns the device context pointer. diff --git a/tensorflow/compiler/jit/node_matchers_test.cc b/tensorflow/compiler/jit/node_matchers_test.cc index 8edb3e456c4c00..6f37d5617b6ce6 100644 --- a/tensorflow/compiler/jit/node_matchers_test.cc +++ b/tensorflow/compiler/jit/node_matchers_test.cc @@ -15,6 +15,8 @@ limitations under the License. #include "tensorflow/compiler/jit/node_matchers.h" +#include + #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" @@ -117,12 +119,26 @@ TEST(NodeMatchers, CheckControlDependence) { EXPECT_THAT(placeholder_d.node(), NodeWith(Name("placeholder_d"), CtrlDeps())); - EXPECT_EQ( - Explain(placeholder_c.node(), NodeWith(CtrlDeps())), - "ctrl_deps, which has 2 elements, does not match expected: is empty"); - EXPECT_EQ(Explain(placeholder_d.node(), NodeWith(CtrlDeps(NodeWith()))), - "ctrl_deps does not match expected: has 1 element and that element " - "is any node"); + // TODO(griffithjames): Exactly match these explanations. + // + // When the OSS build has been updated to include the new error messages, the + // Explain() expectations can be exact strings again. + { + const std::string explanation = + Explain(placeholder_c.node(), NodeWith(CtrlDeps())); + EXPECT_NE(explanation.find("ctrl_deps, which has 2 elements"), + std::string::npos); + EXPECT_NE(explanation.find("does not match expected: is empty"), + std::string::npos); + } + { + const std::string explanation = + Explain(placeholder_d.node(), NodeWith(CtrlDeps(NodeWith()))); + EXPECT_NE(explanation.find("ctrl_deps"), std::string::npos); + EXPECT_NE(explanation.find("does not match expected: has 1 element and " + "that element is any node"), + std::string::npos); + } } TEST(NodeMatchers, ConstValue) { diff --git a/tensorflow/compiler/jit/xla_device.cc b/tensorflow/compiler/jit/xla_device.cc index b5b0c16422ccab..471f54571d2b53 100644 --- a/tensorflow/compiler/jit/xla_device.cc +++ b/tensorflow/compiler/jit/xla_device.cc @@ -52,7 +52,6 @@ limitations under the License. #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/stream_executor_no_cuda.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" diff --git a/tensorflow/compiler/jit/xla_device_context.cc b/tensorflow/compiler/jit/xla_device_context.cc index 821d294af90f66..faf3b65d407a7e 100644 --- a/tensorflow/compiler/jit/xla_device_context.cc +++ b/tensorflow/compiler/jit/xla_device_context.cc @@ -30,6 +30,7 @@ limitations under the License. #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/framework/tensor_reference.h" +#include "tsl/platform/statusor.h" namespace tensorflow { @@ -171,8 +172,8 @@ void XlaDeviceContext::CopyCPUTensorToDevice(const Tensor* cpu_tensor, host_to_device_stream_.get(), literal, xla_tensor->shaped_buffer())); if (UseMultipleStreams()) { - auto event = std::make_shared(stream_->parent()); - TF_RET_CHECK(event->Init()) << "Event failed to initialize!"; + TF_ASSIGN_OR_RETURN(std::shared_ptr event, + stream_->parent()->CreateEvent()); TF_RETURN_IF_ERROR(host_to_device_stream_->RecordEvent(event.get())); xla_tensor->ResetDefinitionEvent(std::move(event), host_to_device_stream_.get()); diff --git a/tensorflow/compiler/jit/xla_host_recv_device_context.cc b/tensorflow/compiler/jit/xla_host_recv_device_context.cc index 54f22fe59fa0bf..ae3c149d5d1387 100644 --- a/tensorflow/compiler/jit/xla_host_recv_device_context.cc +++ b/tensorflow/compiler/jit/xla_host_recv_device_context.cc @@ -38,7 +38,7 @@ void XlaHostRecvDeviceContext::CopyDeviceTensorToCPU( done(status); return; } - status = stream_->RecordEvent(&done_event_.get()); + status = stream_->RecordEvent(done_event_.get().get()); if (!status.ok()) { done(status); return; diff --git a/tensorflow/compiler/jit/xla_host_recv_device_context.h b/tensorflow/compiler/jit/xla_host_recv_device_context.h index 8938fd9c9e0c17..028fd4efd68091 100644 --- a/tensorflow/compiler/jit/xla_host_recv_device_context.h +++ b/tensorflow/compiler/jit/xla_host_recv_device_context.h @@ -36,8 +36,8 @@ namespace tensorflow { // Tensor device_tensor(device_allocator, DT_FLOAT, TensorShape({2, 2})); // se::DeviceMemoryBase gpu_dst{device_tensor.data(), 4 * sizeof(float)}; // xla::Shape shape(xla::F32, {2, 2}, {}, {}) -// tsl::AsyncValueRef done_event = -// tsl::MakeConstructedAsyncValueRef(stream.parent()); +// tsl::AsyncValueRef> done_event = +// tsl::MakeConstructedAsyncValueRef>(stream.parent()); // done_event->Init(); // Tensor dest_cpu_tensor; // @@ -48,10 +48,10 @@ namespace tensorflow { class XlaHostRecvDeviceContext : public DeviceContext { public: - XlaHostRecvDeviceContext(se::Stream* stream, - const se::DeviceMemoryBase& device_memory_base, - const xla::Shape& shape, - tsl::AsyncValueRef& done_event) + XlaHostRecvDeviceContext( + se::Stream* stream, const se::DeviceMemoryBase& device_memory_base, + const xla::Shape& shape, + tsl::AsyncValueRef>& done_event) : stream_(stream), device_memory_base_(device_memory_base), shape_(shape), @@ -82,7 +82,7 @@ class XlaHostRecvDeviceContext : public DeviceContext { // not an issue here since only DeviceMemoryBase methods/members are used. const se::DeviceMemoryBase device_memory_base_; const xla::Shape shape_; - tsl::AsyncValueRef done_event_; + tsl::AsyncValueRef> done_event_; XlaHostRecvDeviceContext(const XlaHostRecvDeviceContext&) = delete; void operator=(const XlaHostRecvDeviceContext&) = delete; diff --git a/tensorflow/compiler/jit/xla_host_send_device_context.cc b/tensorflow/compiler/jit/xla_host_send_device_context.cc index 5d106c8dc3e073..3d1a9a9f5228c6 100644 --- a/tensorflow/compiler/jit/xla_host_send_device_context.cc +++ b/tensorflow/compiler/jit/xla_host_send_device_context.cc @@ -28,7 +28,7 @@ void XlaHostSendDeviceContext::CopyCPUTensorToDevice( done(status); return; } - status = stream_->RecordEvent(&done_event_.get()); + status = stream_->RecordEvent(done_event_.get().get()); if (!status.ok()) { done(status); return; diff --git a/tensorflow/compiler/jit/xla_host_send_device_context.h b/tensorflow/compiler/jit/xla_host_send_device_context.h index d7a254770c969e..f4e4e9a2535341 100644 --- a/tensorflow/compiler/jit/xla_host_send_device_context.h +++ b/tensorflow/compiler/jit/xla_host_send_device_context.h @@ -37,8 +37,8 @@ namespace tensorflow { // Tensor device_tensor(device_allocator, DT_FLOAT, TensorShape({2, 2})); // se::DeviceMemoryBase gpu_dst{device_tensor.data(), 4 * sizeof(float)}; // xla::Shape shape(xla::F32, {2, 2}, {}, {}) -// tsl::AsyncValueRef done_event = -// tsl::MakeConstructedAsyncValueRef(stream.parent()); +// tsl::AsyncValueRef> done_event = +// tsl::MakeConstructedAsyncValueRef>(stream.parent()); // done_event->Init(); // // XlaHostSendDeviceContext device_context(&stream, &gpu_dst, @@ -48,10 +48,10 @@ namespace tensorflow { class XlaHostSendDeviceContext : public DeviceContext { public: - XlaHostSendDeviceContext(se::Stream* stream, - se::DeviceMemoryBase* device_memory_base, - const xla::Shape& shape, - tsl::AsyncValueRef& done_event) + XlaHostSendDeviceContext( + se::Stream* stream, se::DeviceMemoryBase* device_memory_base, + const xla::Shape& shape, + tsl::AsyncValueRef>& done_event) : stream_(stream), device_memory_base_(device_memory_base), shape_(shape), @@ -79,7 +79,7 @@ class XlaHostSendDeviceContext : public DeviceContext { se::Stream* stream_; // Not owned. se::DeviceMemoryBase* device_memory_base_; // Not owned. const xla::Shape shape_; - tsl::AsyncValueRef done_event_; + tsl::AsyncValueRef> done_event_; XlaHostSendDeviceContext(const XlaHostSendDeviceContext&) = delete; void operator=(const XlaHostSendDeviceContext&) = delete; diff --git a/tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc b/tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc index 16f42d1dbe1a0d..62da04c3e7510f 100644 --- a/tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc +++ b/tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc @@ -79,9 +79,10 @@ TEST_F(XlaHostSendRecvDeviceContextTest, CopyDeviceTensorToCPU) { stream->Memcpy(&gpu_dst, origin_cpu_tensor.data(), gpu_dst.size())); TF_ASSERT_OK(stream->BlockHostUntilDone()); - tsl::AsyncValueRef done_event = - tsl::MakeConstructedAsyncValueRef(executor); - done_event->Init(); + TF_ASSERT_OK_AND_ASSIGN(auto se_event, executor->CreateEvent()); + tsl::AsyncValueRef> done_event = + tsl::MakeConstructedAsyncValueRef>( + std::move(se_event)); XlaHostRecvDeviceContext* device_context = new XlaHostRecvDeviceContext(stream.get(), gpu_dst, shape, done_event); TF_ASSERT_OK(device_context->CopyDeviceTensorToCPUSync( @@ -108,9 +109,10 @@ TEST_F(XlaHostSendRecvDeviceContextTest, CopyCPUTensorToDevice) { xla::Shape shape; TF_ASSERT_OK(TensorShapeToXLAShape(DT_FLOAT, TensorShape({2, 2}), &shape)); - tsl::AsyncValueRef done_event = - tsl::MakeConstructedAsyncValueRef(executor); - done_event->Init(); + TF_ASSERT_OK_AND_ASSIGN(auto se_event, executor->CreateEvent()); + tsl::AsyncValueRef> done_event = + tsl::MakeConstructedAsyncValueRef>( + std::move(se_event)); XlaHostSendDeviceContext* device_context = new XlaHostSendDeviceContext(stream.get(), &gpu_dst, shape, done_event); TF_ASSERT_OK(device_context->CopyCPUTensorToDeviceSync( @@ -141,17 +143,19 @@ TEST_F(XlaHostSendRecvDeviceContextTest, RoundTrip) { xla::Shape shape; TF_ASSERT_OK(TensorShapeToXLAShape(DT_FLOAT, TensorShape({2, 2}), &shape)); - tsl::AsyncValueRef send_done_event = - tsl::MakeConstructedAsyncValueRef(executor); - send_done_event->Init(); + TF_ASSERT_OK_AND_ASSIGN(auto se_event, executor->CreateEvent()); + tsl::AsyncValueRef> send_done_event = + tsl::MakeConstructedAsyncValueRef>( + std::move(se_event)); XlaHostSendDeviceContext* send_device_context = new XlaHostSendDeviceContext( stream.get(), &gpu_dst, shape, send_done_event); TF_ASSERT_OK(send_device_context->CopyCPUTensorToDeviceSync( &origin_cpu_tensor, device_.get(), &device_tensor)); - tsl::AsyncValueRef recv_done_event = - tsl::MakeConstructedAsyncValueRef(executor); - recv_done_event->Init(); + TF_ASSERT_OK_AND_ASSIGN(auto recv_se_event, executor->CreateEvent()); + tsl::AsyncValueRef> recv_done_event = + tsl::MakeConstructedAsyncValueRef>( + std::move(recv_se_event)); XlaHostRecvDeviceContext* recv_device_context = new XlaHostRecvDeviceContext( stream.get(), gpu_dst, shape, recv_done_event); TF_ASSERT_OK(recv_device_context->CopyDeviceTensorToCPUSync( diff --git a/tensorflow/compiler/jit/xla_launch_util.cc b/tensorflow/compiler/jit/xla_launch_util.cc index fcfad5c0a7eb67..cfeaa937024b32 100644 --- a/tensorflow/compiler/jit/xla_launch_util.cc +++ b/tensorflow/compiler/jit/xla_launch_util.cc @@ -390,10 +390,7 @@ Status XlaComputationLaunchContext::PopulateOutputs( std::shared_ptr definition_event; if (use_multiple_streams_ && stream) { - definition_event = std::make_shared(stream->parent()); - if (!definition_event->Init()) { - return errors::Internal("Failed to initialize tensor definition event."); - } + TF_ASSIGN_OR_RETURN(definition_event, stream->parent()->CreateEvent()); TF_RETURN_IF_ERROR(stream->RecordEvent(definition_event.get())); } diff --git a/tensorflow/compiler/jit/xla_tpu_device.cc b/tensorflow/compiler/jit/xla_tpu_device.cc index dfedd586df69aa..403e6b17e6fc00 100644 --- a/tensorflow/compiler/jit/xla_tpu_device.cc +++ b/tensorflow/compiler/jit/xla_tpu_device.cc @@ -43,6 +43,7 @@ limitations under the License. #include "tensorflow/core/tpu/tpu_defs.h" #include "tensorflow/core/tpu/tpu_node_device_util.h" #include "tensorflow/core/tpu/virtual_device.h" +#include "tsl/platform/statusor.h" namespace tensorflow { namespace { @@ -271,9 +272,8 @@ void TpuDeviceToDeviceCopy(DeviceContext* src_dev_context, dst_xla_context->host_to_device_stream())); } - auto definition_event = - std::make_shared(dst_xla_context->stream()->parent()); - TF_RET_CHECK(definition_event->Init()) << "Event failed to initialize!"; + TF_ASSIGN_OR_RETURN(std::shared_ptr definition_event, + dst_xla_context->stream()->parent()->CreateEvent()); TF_RETURN_IF_ERROR( dst_device_to_device_stream->RecordEvent(definition_event.get())); xla_output->ResetDefinitionEvent(std::move(definition_event), diff --git a/tensorflow/compiler/mlir/BUILD b/tensorflow/compiler/mlir/BUILD index 810e4277e8c58f..46d5e7e9fb9005 100644 --- a/tensorflow/compiler/mlir/BUILD +++ b/tensorflow/compiler/mlir/BUILD @@ -226,7 +226,6 @@ tf_cc_binary( "//tensorflow/compiler/mlir/tensorflow:translate_cl_options", "//tensorflow/compiler/mlir/tensorflow:translate_lib", "//tensorflow/compiler/mlir/tensorflow:translate_registration", - "//tensorflow/compiler/mlir/tensorflow:translate_tf_dialect_op", "//tensorflow/core:lib", "//tensorflow/core:tensorflow", "@com_google_absl//absl/strings", diff --git a/tensorflow/compiler/mlir/init_mlir.cc b/tensorflow/compiler/mlir/init_mlir.cc index 938cd52359b9d6..ce7cefabcdcf73 100644 --- a/tensorflow/compiler/mlir/init_mlir.cc +++ b/tensorflow/compiler/mlir/init_mlir.cc @@ -15,6 +15,7 @@ limitations under the License. #include "tensorflow/compiler/mlir/init_mlir.h" +#include "llvm/ADT/StringRef.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/PrettyStackTrace.h" #include "tensorflow/core/platform/init_main.h" diff --git a/tensorflow/compiler/mlir/lite/BUILD b/tensorflow/compiler/mlir/lite/BUILD index 72970c2d07d4b5..7e49b1d028ce69 100644 --- a/tensorflow/compiler/mlir/lite/BUILD +++ b/tensorflow/compiler/mlir/lite/BUILD @@ -1026,6 +1026,7 @@ cc_library( ":converter_inc", ":tensorflow_lite", "//tensorflow/compiler/mlir/lite/schema:schema_fbs", + "//tensorflow/compiler/mlir/lite/schema:schema_fbs_with_mutable", "//tensorflow/compiler/mlir/tensorflow:dynamic_shape_utils", "//tensorflow/compiler/mlir/tensorflow:tensorflow_types", "//tensorflow/core/platform:errors", @@ -1033,7 +1034,6 @@ cc_library( "//tensorflow/core/platform:statusor", "//tensorflow/lite/core/c:private_common", "//tensorflow/lite/kernels/internal:kernel_utils", - "//tensorflow/lite/schema:schema_fbs_with_mutable", "//tensorflow/lite/schema:schema_utils", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/strings", @@ -1055,8 +1055,8 @@ tf_native_cc_binary( name = "flatbuffer_to_string", srcs = ["flatbuffer_to_string.cc"], deps = [ + "//tensorflow/compiler/mlir/lite/schema:schema_fbs_with_reflection", "//tensorflow/lite/core:model_builder", - "//tensorflow/lite/schema:schema_fbs_with_reflection", "@flatbuffers", ], ) @@ -1070,21 +1070,6 @@ tf_native_cc_binary( ], ) -cc_library( - name = "emit_error_reporter", - srcs = [ - "emit_error_reporter.cc", - ], - hdrs = [ - "emit_error_reporter.h", - ], - deps = [ - "//tensorflow/lite/core/api", - "@llvm-project//mlir:FuncDialect", - "@llvm-project//mlir:IR", - ], -) - cc_library( name = "flatbuffer_export", srcs = [ @@ -1103,6 +1088,7 @@ cc_library( "//tensorflow/compiler/mlir:op_or_arg_name_mapper", "//tensorflow/compiler/mlir/lite/metrics:error_collector_inst", "//tensorflow/compiler/mlir/lite/quantization/ir:QuantOps", + "//tensorflow/compiler/mlir/lite/schema:schema_fbs_with_mutable", "//tensorflow/compiler/mlir/tensorflow", "//tensorflow/compiler/mlir/tensorflow:convert_tensor", "//tensorflow/compiler/mlir/tensorflow:dynamic_shape_utils", @@ -1120,7 +1106,6 @@ cc_library( "//tensorflow/lite/experimental/remat:metadata_util", "//tensorflow/lite/python/metrics:converter_error_data_proto_cc", "//tensorflow/lite/schema:schema_conversion_utils", - "//tensorflow/lite/schema:schema_fbs_with_mutable", "//tensorflow/lite/toco:toco_flags_proto_cc", "//tensorflow/lite/tools/versioning", "//tensorflow/lite/tools/versioning:gpu_compatibility", @@ -1163,6 +1148,7 @@ cc_library( ":size_utils", ":tensorflow_lite", "//tensorflow/compiler/mlir/lite/quantization/ir:QuantOps", + "//tensorflow/compiler/mlir/lite/schema:schema_fbs_with_mutable", "//tensorflow/compiler/mlir/lite/stablehlo:legalize_stablehlo_composite_to_tfl_custom", "//tensorflow/compiler/mlir/lite/stablehlo:legalize_stablehlo_to_vhlo_pass", "//tensorflow/compiler/mlir/quantization/common/quantization_lib", @@ -1177,7 +1163,6 @@ cc_library( "//tensorflow/core/platform:status", "//tensorflow/lite:framework", "//tensorflow/lite/experimental/remat:metadata_util", - "//tensorflow/lite/schema:schema_fbs_with_mutable", "//tensorflow/lite/schema:schema_utils", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", diff --git a/tensorflow/compiler/mlir/lite/emit_error_reporter.h b/tensorflow/compiler/mlir/lite/emit_error_reporter.h deleted file mode 100644 index 9e9a5925600fc2..00000000000000 --- a/tensorflow/compiler/mlir/lite/emit_error_reporter.h +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_COMPILER_MLIR_LITE_EMIT_ERROR_REPORTER_H_ -#define TENSORFLOW_COMPILER_MLIR_LITE_EMIT_ERROR_REPORTER_H_ - -#include - -#include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project -#include "mlir/IR/BuiltinOps.h" // from @llvm-project -#include "tensorflow/lite/core/api/error_reporter.h" - -namespace tflite { - -// Error reporter that reports errors via the module's emitError. -class EmitErrorReporter : public ErrorReporter { - public: - explicit EmitErrorReporter(mlir::ModuleOp module) : module_(module) {} - int Report(const char* format, va_list args) override; - - private: - mlir::ModuleOp module_; -}; - -} // namespace tflite - -#endif // TENSORFLOW_COMPILER_MLIR_LITE_EMIT_ERROR_REPORTER_H_ diff --git a/tensorflow/compiler/mlir/lite/flatbuffer_export.cc b/tensorflow/compiler/mlir/lite/flatbuffer_export.cc index 84bc7ab2daae19..0b7bd8cc7177e6 100644 --- a/tensorflow/compiler/mlir/lite/flatbuffer_export.cc +++ b/tensorflow/compiler/mlir/lite/flatbuffer_export.cc @@ -85,6 +85,7 @@ limitations under the License. #include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h" #include "tensorflow/compiler/mlir/lite/metrics/error_collector_inst.h" #include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h" +#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h" #include "tensorflow/compiler/mlir/lite/utils/convert_type.h" #include "tensorflow/compiler/mlir/lite/utils/low_bit_utils.h" #include "tensorflow/compiler/mlir/lite/utils/stateful_ops_utils.h" @@ -110,7 +111,6 @@ limitations under the License. #include "tensorflow/lite/experimental/remat/metadata_util.h" #include "tensorflow/lite/graph_info.h" #include "tensorflow/lite/python/metrics/converter_error_data.pb.h" -#include "tensorflow/lite/schema/mutable/schema_generated.h" #include "tensorflow/lite/schema/schema_conversion_utils.h" #include "tensorflow/lite/string_util.h" #include "tensorflow/lite/toco/toco_flags.pb.h" diff --git a/tensorflow/compiler/mlir/lite/flatbuffer_import.cc b/tensorflow/compiler/mlir/lite/flatbuffer_import.cc index 7858c42d0673d3..bcc0244194dccd 100644 --- a/tensorflow/compiler/mlir/lite/flatbuffer_import.cc +++ b/tensorflow/compiler/mlir/lite/flatbuffer_import.cc @@ -77,6 +77,7 @@ limitations under the License. #include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h" #include "tensorflow/compiler/mlir/lite/offset_buffer.h" #include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h" +#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h" #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.h" #include "tensorflow/compiler/mlir/lite/utils/const_tensor_utils.h" #include "tensorflow/compiler/mlir/lite/utils/convert_type.h" @@ -96,7 +97,6 @@ limitations under the License. #include "tensorflow/lite/experimental/remat/metadata_util.h" #include "tensorflow/lite/graph_info.h" #include "tensorflow/lite/model_builder.h" -#include "tensorflow/lite/schema/mutable/schema_generated.h" #include "tensorflow/lite/schema/schema_utils.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" diff --git a/tensorflow/compiler/mlir/lite/flatbuffer_operator.cc b/tensorflow/compiler/mlir/lite/flatbuffer_operator.cc index 0734ebca106228..3dfc21f5c2a07c 100644 --- a/tensorflow/compiler/mlir/lite/flatbuffer_operator.cc +++ b/tensorflow/compiler/mlir/lite/flatbuffer_operator.cc @@ -46,6 +46,7 @@ limitations under the License. #include "stablehlo/dialect/StablehloOps.h" // from @stablehlo #include "stablehlo/dialect/VhloOps.h" // from @stablehlo #include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h" +#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h" #include "tensorflow/compiler/mlir/lite/utils/convert_type.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h" #include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h" @@ -54,7 +55,6 @@ limitations under the License. #include "tensorflow/core/platform/status.h" #include "tensorflow/lite/core/c/builtin_op_data.h" #include "tensorflow/lite/kernels/internal/kernel_utils.h" -#include "tensorflow/lite/schema/mutable/schema_generated.h" #include "tensorflow/lite/schema/schema_utils.h" #include "tsl/platform/status.h" diff --git a/tensorflow/compiler/mlir/lite/flatbuffer_operator.h b/tensorflow/compiler/mlir/lite/flatbuffer_operator.h index 4a54264b6244f2..64865eb77b5c43 100644 --- a/tensorflow/compiler/mlir/lite/flatbuffer_operator.h +++ b/tensorflow/compiler/mlir/lite/flatbuffer_operator.h @@ -34,9 +34,9 @@ limitations under the License. #include "stablehlo/dialect/StablehloOps.h" // from @stablehlo #include "stablehlo/dialect/VhloOps.h" // from @stablehlo #include "stablehlo/dialect/VhloTypes.h" // from @stablehlo +#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" -#include "tensorflow/lite/schema/mutable/schema_generated.h" namespace mlir { diff --git a/tensorflow/compiler/mlir/lite/flatbuffer_to_string.cc b/tensorflow/compiler/mlir/lite/flatbuffer_to_string.cc index b3e7e8e633e0da..df28f501ef7656 100644 --- a/tensorflow/compiler/mlir/lite/flatbuffer_to_string.cc +++ b/tensorflow/compiler/mlir/lite/flatbuffer_to_string.cc @@ -25,7 +25,7 @@ limitations under the License. #include "flatbuffers/flatbuffers.h" // from @flatbuffers #include "flatbuffers/minireflect.h" // from @flatbuffers -#include "tensorflow/lite/schema/reflection/schema_generated.h" +#include "tensorflow/compiler/mlir/lite/schema/reflection/schema_generated.h" #if FLATBUFFERS_LITTLEENDIAN == 0 #include "tensorflow/lite/core/model_builder.h" #endif diff --git a/tensorflow/compiler/mlir/lite/schema/BUILD b/tensorflow/compiler/mlir/lite/schema/BUILD index d9aed023196ad9..34b799a9738741 100644 --- a/tensorflow/compiler/mlir/lite/schema/BUILD +++ b/tensorflow/compiler/mlir/lite/schema/BUILD @@ -18,3 +18,29 @@ flatbuffer_cc_library( srcs = ["schema.fbs"], compatible_with = get_compatible_with_portable(), ) + +# Generic schema for flatbuffer converter (but with mutable makes bigger). +flatbuffer_cc_library( + name = "schema_fbs_with_mutable", + srcs = ["schema.fbs"], + compatible_with = get_compatible_with_portable(), + flatc_args = [ + "--gen-mutable", + "--gen-object-api", + ], + out_prefix = "mutable/", +) + +# Generic schema for inference on device (but with reflections makes bigger). +flatbuffer_cc_library( + name = "schema_fbs_with_reflection", + srcs = ["schema.fbs"], + compatible_with = get_compatible_with_portable(), + flatc_args = [ + "--reflect-types", + "--reflect-names", + "--no-union-value-namespacing", + "--gen-object-api", + ], + out_prefix = "reflection/", +) diff --git a/tensorflow/compiler/mlir/lite/stablehlo/odml_converter/BUILD b/tensorflow/compiler/mlir/lite/stablehlo/odml_converter/BUILD index eb0d86946e56eb..5add6c730cac5e 100644 --- a/tensorflow/compiler/mlir/lite/stablehlo/odml_converter/BUILD +++ b/tensorflow/compiler/mlir/lite/stablehlo/odml_converter/BUILD @@ -18,7 +18,6 @@ package_group( tf_cc_binary( name = "odml-converter", - testonly = True, srcs = ["odml_converter_main.cc"], compatible_with = get_compatible_with_portable(), visibility = [ diff --git a/tensorflow/compiler/mlir/lite/stablehlo/odml_converter/transforms/outline_composites.cc b/tensorflow/compiler/mlir/lite/stablehlo/odml_converter/transforms/outline_composites.cc index b66b8f7e51ff60..821ba4fa7e4d2f 100644 --- a/tensorflow/compiler/mlir/lite/stablehlo/odml_converter/transforms/outline_composites.cc +++ b/tensorflow/compiler/mlir/lite/stablehlo/odml_converter/transforms/outline_composites.cc @@ -94,7 +94,7 @@ bool HasSplatArg(Operation* op, float val, int opr_num) { // Determines if the given op is semantically that of the gauss error function. bool MatchERF(Operation* op) { if (auto custom_call = llvm::dyn_cast_or_null(op)) { - return custom_call.getCallTargetName().equals("mhlo.erf"); + return custom_call.getCallTargetName() == "mhlo.erf"; } return llvm::isa(op); } diff --git a/tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir b/tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir index 20d56eea578d79..7e60dc85a487a6 100644 --- a/tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir +++ b/tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir @@ -431,8 +431,8 @@ module { %2 = stablehlo.constant dense<-128> : tensor<1x1x1xi8> // Input 1 zero point (z1). %3 = stablehlo.constant dense<-128> : tensor<1x1x1xi32> // Input 1 zero point (z1) (upcast & folded into i32). %4 = stablehlo.constant dense<4.000000e-01> : tensor<1x1x1xf32> // Input 2 inverse scale (1 / s2). - %5 = stablehlo.constant dense<-3> : tensor<1x1x1xi8> // Input 2 zero point (z2). - %6 = stablehlo.constant dense<-3> : tensor<1x1x1xi32> // Input 2 zero point (z2) (upcast & folded into i32). + %5 = stablehlo.constant dense<0> : tensor<1x1x1xi8> // Input 2 zero point (z2). + %6 = stablehlo.constant dense<0> : tensor<1x1x1xi32> // Input 2 zero point (z2) (upcast & folded into i32). %7 = stablehlo.constant dense<5.000000e-01> : tensor<1x1x1xf32> // Output inverse scale (1 / s3). %8 = stablehlo.constant dense<-5> : tensor<1x1x1xi8> // Output zero point (z3). %9 = stablehlo.constant dense<1.250000e+01> : tensor<1x1x1xf32> // Merged scale (s1 * s2). @@ -454,8 +454,8 @@ module { return %23 : tensor<8x16x4xf32> } // CHECK: %[[UQ_0:.*]] = stablehlo.uniform_quantize %[[ARG_0]] : (tensor<8x16x16xf32>) -> tensor<8x16x16x!quant.uniform> -// CHECK: %[[UQ_1:.*]] = stablehlo.uniform_quantize %[[ARG_1]] : (tensor<8x16x4xf32>) -> tensor<8x16x4x!quant.uniform> -// CHECK: %[[DOT_GENERAL:.*]] = stablehlo.dot_general %[[UQ_0]], %[[UQ_1]], batching_dims = [0] x [0], contracting_dims = [2] x [1] : (tensor<8x16x16x!quant.uniform>, tensor<8x16x4x!quant.uniform>) -> tensor<8x16x4x!quant.uniform> +// CHECK: %[[UQ_1:.*]] = stablehlo.uniform_quantize %[[ARG_1]] : (tensor<8x16x4xf32>) -> tensor<8x16x4x!quant.uniform> +// CHECK: %[[DOT_GENERAL:.*]] = stablehlo.dot_general %[[UQ_0]], %[[UQ_1]], batching_dims = [0] x [0], contracting_dims = [2] x [1] : (tensor<8x16x16x!quant.uniform>, tensor<8x16x4x!quant.uniform>) -> tensor<8x16x4x!quant.uniform> // CHECK: %[[DQ_0:.*]] = stablehlo.uniform_dequantize %[[DOT_GENERAL]] : (tensor<8x16x4x!quant.uniform>) -> tensor<8x16x4xf32> // CHECK: return %[[DQ_0]] @@ -492,7 +492,7 @@ module { %1 = stablehlo.constant dense<2.000000e-01> : tensor<1x1x1xf32> // Input 1 inverse scale (1 / s1). %2 = stablehlo.constant dense<-128> : tensor<1x1x1xi8> // Input 1 zero point (z1). %3 = stablehlo.constant dense<4.000000e-01> : tensor<1x1x1xf32> // Input 2 inverse scale (1 / s2). - %4 = stablehlo.constant dense<-3> : tensor<1x1x1xi8> // Input 2 zero point (z2). + %4 = stablehlo.constant dense<0> : tensor<1x1x1xi8> // Input 2 zero point (z2). %5 = stablehlo.constant dense<5.000000e-01> : tensor<1x1x1xf32> // Output inverse scale (1 / s3). %6 = stablehlo.constant dense<-5> : tensor<1x1x1xi8> // Output zero point (z3). %7 = stablehlo.constant dense<1.250000e+01> : tensor<1x1x1xf32> // Merged scale (s1 * s2). @@ -516,8 +516,8 @@ module { return %23 : tensor<8x16x4xf32> } // CHECK: %[[UQ_0:.*]] = stablehlo.uniform_quantize %[[ARG_0]] : (tensor<8x16x16xf32>) -> tensor<8x16x16x!quant.uniform> -// CHECK: %[[UQ_1:.*]] = stablehlo.uniform_quantize %[[ARG_1]] : (tensor<8x16x4xf32>) -> tensor<8x16x4x!quant.uniform> -// CHECK: %[[DOT_GENERAL:.*]] = stablehlo.dot_general %[[UQ_0]], %[[UQ_1]], batching_dims = [0] x [0], contracting_dims = [2] x [1] : (tensor<8x16x16x!quant.uniform>, tensor<8x16x4x!quant.uniform>) -> tensor<8x16x4x!quant.uniform> +// CHECK: %[[UQ_1:.*]] = stablehlo.uniform_quantize %[[ARG_1]] : (tensor<8x16x4xf32>) -> tensor<8x16x4x!quant.uniform> +// CHECK: %[[DOT_GENERAL:.*]] = stablehlo.dot_general %[[UQ_0]], %[[UQ_1]], batching_dims = [0] x [0], contracting_dims = [2] x [1] : (tensor<8x16x16x!quant.uniform>, tensor<8x16x4x!quant.uniform>) -> tensor<8x16x4x!quant.uniform> // CHECK: %[[DQ_0:.*]] = stablehlo.uniform_dequantize %[[DOT_GENERAL]] : (tensor<8x16x4x!quant.uniform>) -> tensor<8x16x4xf32> // CHECK: return %[[DQ_0]] diff --git a/tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir b/tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir index 292e6f5f8ee612..4121caa60a8e0d 100644 --- a/tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir +++ b/tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir @@ -187,3 +187,56 @@ func.func @gelu(%arg0: tensor<2xf32>) -> tensor<2xf32> { // CHECK-LABEL: gelu // CHECK: %0 = "tfl.gelu"(%arg0) <{approximate = false}> : (tensor<2xf32>) -> tensor<2xf32> +// CHECK-LABEL func.func @jax_image_resize_nearest +func.func @jax_image_resize_nearest(%arg0: tensor<1x2x2x10xf32>) -> (tensor<1x4x4x10xf32>) { + %1 = mhlo.composite "odml.jax_resize_nearest_neighbor2d" %arg0 {composite_attributes = {output_size = dense<4> : tensor<2xi64>}, decomposition = @XlaCallModule_odml.jax_resize_nearest_neighbor2d.impl_0} : (tensor<1x2x2x10xf32>) -> tensor<1x4x4x10xf32> + return %1 : tensor<1x4x4x10xf32> +} +func.func private @XlaCallModule_odml.jax_resize_nearest_neighbor2d.impl_0(%arg0: tensor<1x2x2x10xf32>) -> tensor<1x4x4x10xf32> { + %0 = call @XlaCallModule__resize_0(%arg0) : (tensor<1x2x2x10xf32>) -> tensor<1x4x4x10xf32> + return %0 : tensor<1x4x4x10xf32> +} +func.func private @XlaCallModule__resize_0(%arg0: tensor<1x2x2x10xf32>) -> (tensor<1x4x4x10xf32>) { + %0 = mhlo.constant dense<2> : tensor + %1 = mhlo.constant dense<0> : tensor + %2 = mhlo.constant dense<4.000000e+00> : tensor + %3 = mhlo.constant dense<2.000000e+00> : tensor + %4 = mhlo.constant dense<5.000000e-01> : tensor + %5 = "mhlo.iota"() <{iota_dimension = 0 : i64}> : () -> tensor<4xf32> + %6 = "mhlo.broadcast_in_dim"(%4) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor) -> tensor<4xf32> + %7 = mhlo.add %5, %6 : tensor<4xf32> + %8 = "mhlo.broadcast_in_dim"(%3) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor) -> tensor<4xf32> + %9 = mhlo.multiply %7, %8 : tensor<4xf32> + %10 = "mhlo.broadcast_in_dim"(%2) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor) -> tensor<4xf32> + %11 = mhlo.divide %9, %10 : tensor<4xf32> + %12 = mhlo.floor %11 : tensor<4xf32> + %13 = mhlo.convert %12 : (tensor<4xf32>) -> tensor<4xi32> + %14 = "mhlo.broadcast_in_dim"(%1) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor) -> tensor<4xi32> + %15 = mhlo.compare LT, %13, %14, SIGNED : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> + %16 = "mhlo.broadcast_in_dim"(%0) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor) -> tensor<4xi32> + %17 = mhlo.add %13, %16 : tensor<4xi32> + %18 = mhlo.select %15, %17, %13 : tensor<4xi1>, tensor<4xi32> + %19 = "mhlo.broadcast_in_dim"(%18) <{broadcast_dimensions = dense<0> : tensor<1xi64>}> : (tensor<4xi32>) -> tensor<4x1xi32> + %20 = "mhlo.gather"(%arg0, %19) <{dimension_numbers = #mhlo.gather, slice_sizes = dense<[1, 1, 2, 10]> : tensor<4xi64>}> : (tensor<1x2x2x10xf32>, tensor<4x1xi32>) -> tensor<1x4x2x10xf32> + %21 = "mhlo.iota"() <{iota_dimension = 0 : i64}> : () -> tensor<4xf32> + %22 = "mhlo.broadcast_in_dim"(%4) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor) -> tensor<4xf32> + %23 = mhlo.add %21, %22 : tensor<4xf32> + %24 = "mhlo.broadcast_in_dim"(%3) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor) -> tensor<4xf32> + %25 = mhlo.multiply %23, %24 : tensor<4xf32> + %26 = "mhlo.broadcast_in_dim"(%2) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor) -> tensor<4xf32> + %27 = mhlo.divide %25, %26 : tensor<4xf32> + %28 = mhlo.floor %27 : tensor<4xf32> + %29 = mhlo.convert %28 : (tensor<4xf32>) -> tensor<4xi32> + %30 = "mhlo.broadcast_in_dim"(%1) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor) -> tensor<4xi32> + %31 = mhlo.compare LT, %29, %30, SIGNED : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1> + %32 = "mhlo.broadcast_in_dim"(%0) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor) -> tensor<4xi32> + %33 = mhlo.add %29, %32 : tensor<4xi32> + %34 = mhlo.select %31, %33, %29 : tensor<4xi1>, tensor<4xi32> + %35 = "mhlo.broadcast_in_dim"(%34) <{broadcast_dimensions = dense<0> : tensor<1xi64>}> : (tensor<4xi32>) -> tensor<4x1xi32> + %36 = "mhlo.gather"(%20, %35) <{dimension_numbers = #mhlo.gather, slice_sizes = dense<[1, 4, 1, 10]> : tensor<4xi64>}> : (tensor<1x4x2x10xf32>, tensor<4x1xi32>) -> tensor<1x4x4x10xf32> + return %36 : tensor<1x4x4x10xf32> +} + +// CHECK: %cst = arith.constant dense<4> : tensor<2xi32> +// CHECK: %0 = "tfl.resize_nearest_neighbor"(%arg0, %cst) <{align_corners = false, half_pixel_centers = true}> : (tensor<1x2x2x10xf32>, tensor<2xi32>) -> tensor<1x4x4x10xf32> +// CHECK: return %0 : tensor<1x4x4x10xf32> diff --git a/tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir b/tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir index dde72965ae65fc..64b14b85fc7c71 100644 --- a/tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir +++ b/tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir @@ -393,13 +393,13 @@ func.func @dot_general_upstream_srq_float_operands(%arg0: tensor<1x2x3x4xf32>, % // CHECK-LABEL: dot_general_upstream_srq_asym_weight func.func @dot_general_upstream_srq_asym_weight(%arg0: tensor<1x2x3x4x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> { - %0 = stablehlo.constant() {value = dense<1> : tensor<1x2x4x5xi8>} : () -> tensor<1x2x4x5x!quant.uniform> - %1 = "stablehlo.dot_general"(%arg0, %0) {dot_dimension_numbers = #stablehlo.dot, precision_config = [#stablehlo, #stablehlo]} : (tensor<1x2x3x4x!quant.uniform>, tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> + %0 = stablehlo.constant() {value = dense<1> : tensor<1x2x4x5xi8>} : () -> tensor<1x2x4x5x!quant.uniform> + %1 = "stablehlo.dot_general"(%arg0, %0) {dot_dimension_numbers = #stablehlo.dot, precision_config = [#stablehlo, #stablehlo]} : (tensor<1x2x3x4x!quant.uniform>, tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> return %1 : tensor<1x2x3x5x!quant.uniform> } // CHECK-SAME: %[[ARG:.+]]: tensor<1x2x3x4x!quant.uniform> -// CHECK: %[[QCONST_0:.+]] = "tfl.pseudo_qconst"() <{qtype = tensor<1x2x4x5x!quant.uniform>, value = dense<1> : tensor<1x2x4x5xi8>}> : () -> tensor<1x2x4x5x!quant.uniform> -// CHECK: %[[BMM:.+]] = "tfl.batch_matmul"(%[[ARG]], %[[QCONST_0]]) <{adj_x = false, adj_y = false}> : (tensor<1x2x3x4x!quant.uniform>, tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> +// CHECK: %[[QCONST_0:.+]] = "tfl.pseudo_qconst"() <{qtype = tensor<1x2x4x5x!quant.uniform>, value = dense<1> : tensor<1x2x4x5xi8>}> : () -> tensor<1x2x4x5x!quant.uniform> +// CHECK: %[[BMM:.+]] = "tfl.batch_matmul"(%[[ARG]], %[[QCONST_0]]) <{adj_x = false, adj_y = false}> : (tensor<1x2x3x4x!quant.uniform>, tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> // ----- @@ -427,8 +427,8 @@ func.func @dot_general_upstream_srq_per_axis_quantized_filter(%arg0: tensor<1x3x // CHECK-LABEL: dot_general_upstream_srq_per_axis_quantized_filter_with_batch_dim func.func @dot_general_upstream_srq_per_axis_quantized_filter_with_batch_dim(%arg0: tensor<1x1x3x!quant.uniform>) -> tensor<1x1x2x!quant.uniform> { - %0 = stablehlo.constant() {value = dense<1> : tensor<1x3x2xi8>} : () -> tensor<1x3x2x!quant.uniform> - %1 = stablehlo.dot_general %arg0, %0, batching_dims = [0] x [0], contracting_dims = [2] x [1] : (tensor<1x1x3x!quant.uniform>, tensor<1x3x2x!quant.uniform>) -> tensor<1x1x2x!quant.uniform> + %0 = stablehlo.constant() {value = dense<1> : tensor<1x3x2xi8>} : () -> tensor<1x3x2x!quant.uniform> + %1 = stablehlo.dot_general %arg0, %0, batching_dims = [0] x [0], contracting_dims = [2] x [1] : (tensor<1x1x3x!quant.uniform>, tensor<1x3x2x!quant.uniform>) -> tensor<1x1x2x!quant.uniform> return %1 : tensor<1x1x2x!quant.uniform> } // Nothing changes. @@ -459,8 +459,8 @@ func.func @dot_general_upstream_srq_per_axis_quantized_filter_multibatch(%arg0: // CHECK-LABEL: dot_general_upstream_srq_per_axis_quantized_filter_with_multiple_contracting_dims func.func @dot_general_upstream_srq_per_axis_quantized_filter_with_multiple_contracting_dims(%arg0: tensor<1x2x3x!quant.uniform>) -> tensor<1x1x!quant.uniform> { - %0 = stablehlo.constant() {value = dense<1> : tensor<1x3x2xi8>} : () -> tensor<1x3x2x!quant.uniform> - %1 = stablehlo.dot_general %arg0, %0, contracting_dims = [1, 2] x [2, 1] : (tensor<1x2x3x!quant.uniform>, tensor<1x3x2x!quant.uniform>) -> tensor<1x1x!quant.uniform> + %0 = stablehlo.constant() {value = dense<1> : tensor<1x3x2xi8>} : () -> tensor<1x3x2x!quant.uniform> + %1 = stablehlo.dot_general %arg0, %0, contracting_dims = [1, 2] x [2, 1] : (tensor<1x2x3x!quant.uniform>, tensor<1x3x2x!quant.uniform>) -> tensor<1x1x!quant.uniform> return %1 : tensor<1x1x!quant.uniform> } // Nothing changes. @@ -557,9 +557,9 @@ func.func @dot_general_srq_constant_transpose_rhs(%arg0: tensor<1x3x!quant.unifo // (e.g. argument), the conversion to `tfl.fully_connected` doesn't happen. // CHECK-LABEL: dot_general_srq_arg_transpose_rhs -func.func @dot_general_srq_arg_transpose_rhs(%arg0: tensor<1x3x!quant.uniform>, %arg1: tensor<2x3x!quant.uniform>) -> tensor<1x2x!quant.uniform> { - %1 = stablehlo.transpose %arg1, dims = [1, 0] : (tensor<2x3x!quant.uniform>) -> tensor<3x2x!quant.uniform> - %2 = stablehlo.dot_general %arg0, %1, contracting_dims = [1] x [0] : (tensor<1x3x!quant.uniform>, tensor<3x2x!quant.uniform>) -> tensor<1x2x!quant.uniform> +func.func @dot_general_srq_arg_transpose_rhs(%arg0: tensor<1x3x!quant.uniform>, %arg1: tensor<2x3x!quant.uniform>) -> tensor<1x2x!quant.uniform> { + %1 = stablehlo.transpose %arg1, dims = [1, 0] : (tensor<2x3x!quant.uniform>) -> tensor<3x2x!quant.uniform> + %2 = stablehlo.dot_general %arg0, %1, contracting_dims = [1] x [0] : (tensor<1x3x!quant.uniform>, tensor<3x2x!quant.uniform>) -> tensor<1x2x!quant.uniform> %3 = stablehlo.uniform_quantize %2 : (tensor<1x2x!quant.uniform>) -> tensor<1x2x!quant.uniform> return %3 : tensor<1x2x!quant.uniform> } @@ -577,7 +577,7 @@ func.func @dot_general_srq_arg_transpose_rhs(%arg0: tensor<1x3x!quant.uniform qi8 requantization is // properly lowered to `tfl.batch_matmul`. -func.func @dot_general_srq_to_batch_matmul(%arg0: tensor<1x2x3x4x!quant.uniform>, %arg1: tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> { +func.func @dot_general_srq_to_batch_matmul(%arg0: tensor<1x2x3x4x!quant.uniform>, %arg1: tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> { %0 = "stablehlo.dot_general"(%arg0, %arg1) { dot_dimension_numbers = #stablehlo.dot< lhs_batching_dimensions = [0, 1], @@ -586,14 +586,14 @@ func.func @dot_general_srq_to_batch_matmul(%arg0: tensor<1x2x3x4x!quant.uniform< rhs_contracting_dimensions = [2] >, precision_config = [#stablehlo, #stablehlo] - } : (tensor<1x2x3x4x!quant.uniform>, tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> + } : (tensor<1x2x3x4x!quant.uniform>, tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> %1 = stablehlo.uniform_quantize %0 : (tensor<1x2x3x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> return %1 : tensor<1x2x3x5x!quant.uniform> } // CHECK-LABEL: dot_general_srq_to_batch_matmul -// CHECK-SAME: (%[[ARG_0:.+]]: tensor<1x2x3x4x!quant.uniform>, %[[ARG_1:.+]]: tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> -// CHECK: %[[BMM:.+]] = "tfl.batch_matmul"(%[[ARG_0]], %[[ARG_1]]) <{adj_x = false, adj_y = false}> : (tensor<1x2x3x4x!quant.uniform>, tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> +// CHECK-SAME: (%[[ARG_0:.+]]: tensor<1x2x3x4x!quant.uniform>, %[[ARG_1:.+]]: tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> +// CHECK: %[[BMM:.+]] = "tfl.batch_matmul"(%[[ARG_0]], %[[ARG_1]]) <{adj_x = false, adj_y = false}> : (tensor<1x2x3x4x!quant.uniform>, tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> // CHECK-NOT: stablehlo.dot_general // CHECK-NOT: stablehlo.uniform_quantize // CHECK-NOT: tfl.fully_connected @@ -606,7 +606,7 @@ func.func @dot_general_srq_to_batch_matmul(%arg0: tensor<1x2x3x4x!quant.uniform< // not converted to `tfl.batch_matmul` when there are multiple use of the // intermediate result. -func.func @dot_general_srq_multiple_use_of_intermediate_result(%arg0: tensor<1x2x3x4x!quant.uniform>, %arg1: tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> { +func.func @dot_general_srq_multiple_use_of_intermediate_result(%arg0: tensor<1x2x3x4x!quant.uniform>, %arg1: tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> { %0 = "stablehlo.dot_general"(%arg0, %arg1) { dot_dimension_numbers = #stablehlo.dot< lhs_batching_dimensions = [0, 1], @@ -615,7 +615,7 @@ func.func @dot_general_srq_multiple_use_of_intermediate_result(%arg0: tensor<1x2 rhs_contracting_dimensions = [2] >, precision_config = [#stablehlo, #stablehlo] - } : (tensor<1x2x3x4x!quant.uniform>, tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> + } : (tensor<1x2x3x4x!quant.uniform>, tensor<1x2x4x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> %1 = stablehlo.uniform_quantize %0 : (tensor<1x2x3x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> %2 = stablehlo.uniform_quantize %0 : (tensor<1x2x3x5x!quant.uniform>) -> tensor<1x2x3x5x!quant.uniform> %3 = stablehlo.add %1, %2 : tensor<1x2x3x5x!quant.uniform> diff --git a/tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_lowering_patterns.td b/tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_lowering_patterns.td index 89327e492d72aa..5b9324c2a1782b 100644 --- a/tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_lowering_patterns.td +++ b/tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_lowering_patterns.td @@ -50,6 +50,21 @@ def LegalizeTorchUpsampleBlinear2dComposite: Pat< ConstantAttr,"{0, 3, 1, 2}">)), [(IsSupportedNchwUpsampleBlinear $input, $old_val, $attrs)]>; +// TODO(b/333961789): Add support for NCHW layout for PyTorch resize, plus jax +// supports NCHW inputs as well, so we need to add reliable way of checking the +// layout. +// pattern to lower a stablehlo.composite with `jax.image.resize` in `nearest` +// mode to a tflite.resize_nearest_neighbor op. +def LegalizeJaxResizeNearestNeighbor2dComposite: Pat< + (MHLO_CompositeOp:$old_val + (variadic $input), + ConstantStrAttr, $attrs, $_, $_), + (TFL_ResizeNearestNeighborOp + $input, + (Arith_ConstantOp:$output_size (GetI32DenseAttr (GetAsVectorAttr<"output_size"> $attrs))), + ConstBoolAttrFalse, + ConstBoolAttrTrue)>; + def LegalizeCompositeGELU : Pat<(MHLO_CompositeOp:$composite (variadic $inputs), diff --git a/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_stablehlo_to_vhlo.cc b/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_stablehlo_to_vhlo.cc index a03d1a1d958f65..b3a85259cc482a 100644 --- a/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_stablehlo_to_vhlo.cc +++ b/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_stablehlo_to_vhlo.cc @@ -280,7 +280,7 @@ struct LegalizeStablehloToVhloPass VhloToStablehloTypeConverter to_builtin_converter; // StableHLO --> VHLO (allow funcs) - // VHLO -> Downgrade to 0.19.0 / tflite_supported_stablehlo_version + // VHLO -> Downgrade to tflite_supported_stablehlo_version // VHLO Tensor --> Builtin Tensor // Remove cast(tensor->vhlo) -> cast(vhlo->tensor) pattern if (failed(ApplyStablehloToVhloPatterns(module, diff --git a/tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc b/tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc index b18227bdddc1ee..2011b6d33ccd45 100644 --- a/tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc +++ b/tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc @@ -58,7 +58,6 @@ limitations under the License. #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.h" #include "tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.h" #include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h" -#include "xla/status.h" #include "xla/statusor.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.pb.h" diff --git a/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc b/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc index 98cc1048bced90..f6595331c02415 100644 --- a/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc +++ b/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc @@ -140,8 +140,7 @@ LogicalResult ConvertMaxUnpoolingFunc::VerifySignature() { return func_.emitWarning() << "'padding' attribute for " << kMaxUnpooling << " is not set or not a string"; } - if (!padding.getValue().equals("VALID") && - !padding.getValue().equals("SAME")) { + if (padding.getValue() != "VALID" && padding.getValue() != "SAME") { return func_.emitWarning() << "Padding for " << kMaxUnpooling << " must be 'SAME' or 'VALID'"; } @@ -174,9 +173,9 @@ LogicalResult ConvertMaxUnpoolingFunc::CreateCustomOptions( return func_.emitError() << "'padding' attribute for " << kMaxUnpooling << " is not set or not a string"; } - if (padding.getValue().equals("VALID")) { + if (padding.getValue() == "VALID") { pool_params.padding = kTfLitePaddingValid; - } else if (padding.getValue().equals("SAME")) { + } else if (padding.getValue() == "SAME") { pool_params.padding = kTfLitePaddingSame; } else { return func_.emitError() diff --git a/tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc b/tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc index ad0fc81157401f..720616309afe38 100644 --- a/tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc +++ b/tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc @@ -143,8 +143,8 @@ constexpr absl::string_view kModulePartitionedCall = R"mlir( constexpr absl::string_view kModuleHybridQuantized = R"mlir( module { - func.func @main(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3x!quant.uniform> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<1x3xf32>) { - %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3x!quant.uniform>) -> tensor<1x3xf32> + func.func @main(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3x!quant.uniform> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<1x3xf32>) { + %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3x!quant.uniform>) -> tensor<1x3xf32> return %0 : tensor<1x3xf32> } } diff --git a/tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc b/tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc index a507b816426e8b..bf894948d4cec8 100644 --- a/tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc +++ b/tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc @@ -270,7 +270,7 @@ LogicalResult SetAttributeMap(MLIRContext& context, if (const auto string_attr = mlir::dyn_cast_or_null(attribute.getValue()); string_attr != nullptr && - string_attr.getValue().equals(kNullAttributeValue)) { + string_attr.getValue() == kNullAttributeValue) { continue; } diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/BUILD b/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/BUILD index 7b7653c9db12c8..9926546f8c47a8 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/BUILD +++ b/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/BUILD @@ -61,9 +61,11 @@ cc_library( "//tensorflow/compiler/mlir/quantization/stablehlo/cc:types", "//tensorflow/compiler/mlir/quantization/tensorflow:exported_model_proto_cc", "//tensorflow/compiler/mlir/quantization/tensorflow:quantization_options_proto_cc", + "//tensorflow/compiler/mlir/quantization/tensorflow/calibrator:calibration_statistics_proto_cc", "//tensorflow/compiler/mlir/quantization/tensorflow/cc:run_passes", "//tensorflow/compiler/mlir/quantization/tensorflow/python:py_function_lib", "//tensorflow/core/protobuf:for_core_protos_cc", + "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/base:nullability", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/log", diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.cc b/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.cc index 2ec9b38865edfb..52db906e512391 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.cc +++ b/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.cc @@ -20,6 +20,7 @@ limitations under the License. #include #include +#include "absl/algorithm/container.h" #include "absl/base/nullability.h" #include "absl/container/flat_hash_map.h" #include "absl/log/die_if_null.h" @@ -42,6 +43,7 @@ limitations under the License. #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h" +#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics.pb.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/run_passes.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/exported_model.pb.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h" @@ -51,32 +53,52 @@ limitations under the License. #include "tsl/platform/statusor.h" namespace mlir::quant::stablehlo { +namespace { using ::stablehlo::quantization::AddCalibrationStatistics; using ::stablehlo::quantization::CreateRepresentativeDatasetFileMap; using ::stablehlo::quantization::DisableDebugging; +using ::stablehlo::quantization::IsCalibrationRequired; using ::stablehlo::quantization::QuantizationConfig; +using ::stablehlo::quantization::ReadStatistics; using ::stablehlo::quantization::RepresentativeDatasetConfig; using ::stablehlo::quantization::io::CreateTmpDir; using ::stablehlo::quantization::io::GetLocalTmpFileName; +using ::stablehlo::quantization::io::ListDirectory; using ::tensorflow::AssetFileDef; using ::tensorflow::SignatureDef; +using ::tensorflow::calibrator::CalibrationStatistics; using ::tensorflow::quantization::ExportedModel; using ::tensorflow::quantization::PyFunctionLibrary; using ::tensorflow::quantization::RunPasses; +using CalibrationStatisticsFlatMap = + absl::flat_hash_map; -absl::Status RunCalibrationPasses(mlir::ModuleOp module_op, MLIRContext& ctx, - absl::string_view calibration_data_dir) { +} // namespace + +absl::Status RunCalibrationPasses( + mlir::ModuleOp module_op, MLIRContext& ctx, + absl::string_view calibration_data_dir, + const bool force_regenerate_calibration_data) { // Disable DumpTensor ops when running calibration. DisableDebugging(module_op); + std::vector skipping_aggregator_ops; + if (!force_regenerate_calibration_data) { + TF_ASSIGN_OR_RETURN(const CalibrationStatisticsFlatMap statistics_map, + ReadStatistics(calibration_data_dir)); + absl::c_for_each(statistics_map, [&](const auto& iter) { + return skipping_aggregator_ops.push_back(iter.first); + }); + } + return RunPasses( /*name=*/ CalibrationComponent::kName, /*add_passes_func=*/ - [calibration_data_dir](PassManager& pm) { - pm.addPass( - CreateInsertCalibrationStatisticsSaverPass(calibration_data_dir)); + [calibration_data_dir, &skipping_aggregator_ops](PassManager& pm) { + pm.addPass(CreateInsertCalibrationStatisticsSaverPass( + calibration_data_dir, skipping_aggregator_ops)); }, ctx, module_op); } @@ -97,8 +119,9 @@ CalibrationComponent::CalibrationComponent( signature_def_map_(std::move(signature_def_map)), signature_keys_(std::move(signature_keys)) {} -absl::StatusOr CalibrationComponent::ExportToSavedModel( +absl::Status CalibrationComponent::ExportToSavedModel( ModuleOp module_op, absl::string_view calibration_data_dir, + const bool force_regenerate_calibration_data, const absl::string_view dst_saved_model_path) { TF_ASSIGN_OR_RETURN(const std::string checkpoint_dir, GetLocalTmpFileName()); @@ -106,8 +129,13 @@ absl::StatusOr CalibrationComponent::ExportToSavedModel( // be reflected in the original values. mlir::OwningOpRef cloned_module_ref(module_op.clone()); - TF_RETURN_IF_ERROR( - RunCalibrationPasses(*cloned_module_ref, *ctx_, calibration_data_dir)); + TF_RETURN_IF_ERROR(RunCalibrationPasses(*cloned_module_ref, *ctx_, + calibration_data_dir, + force_regenerate_calibration_data)); + + const bool is_calibration_required = + IsCalibrationRequired(*cloned_module_ref); + if (!is_calibration_required) return absl::OkStatus(); // `duplicate_shape_determining_constants = false` because the // resulting graph of this step is not expected to be loaded on TPU. @@ -128,13 +156,13 @@ absl::StatusOr CalibrationComponent::ExportToSavedModel( src_saved_model_path_, tags_, signature_def_map_); - return exported_model; + return absl::OkStatus(); } absl::StatusOr CalibrationComponent::Run( ModuleOp module_op, const QuantizationConfig& config) { - // Exports the pre-calibrated model to SavedModel. - TF_ASSIGN_OR_RETURN(const std::string precalibrated_saved_model_dir, + // Export the calibration model to SavedModel. + TF_ASSIGN_OR_RETURN(const std::string calibration_saved_model_dir, CreateTmpDir()); std::string calibration_data_dir = @@ -143,29 +171,32 @@ absl::StatusOr CalibrationComponent::Run( TF_ASSIGN_OR_RETURN(calibration_data_dir, CreateTmpDir()); } - TF_ASSIGN_OR_RETURN(ExportedModel exported_model, - ExportToSavedModel(module_op, calibration_data_dir, - precalibrated_saved_model_dir)); - - // Translates `RepresentativeDatasetConfig`s to signature key -> - // `RepresentativeDatasetFile` mapping. - const auto dataset_configs = - config.calibration_options().representative_datasets(); - const std::vector dataset_config_vector( - dataset_configs.begin(), dataset_configs.end()); - TF_ASSIGN_OR_RETURN( - const auto representative_dataset_file_map, - CreateRepresentativeDatasetFileMap(dataset_config_vector)); - - // Runs calibration on the exported model. The statistics will be stored in a - // separate singleton object `CalibratorSingleton` and are directly added to - // `exported_model` without re-importing it. - if (py_function_lib_->RunCalibration( - precalibrated_saved_model_dir, signature_keys_, tags_, - /*force_graph_mode_calibration=*/true, - representative_dataset_file_map) == std::nullopt) { - return absl::InternalError( - "CalibrationComponent error: Failed to run calibration."); + TF_RETURN_IF_ERROR(ExportToSavedModel( + module_op, calibration_data_dir, + config.calibration_options().force_regenerate_calibration_data(), + calibration_saved_model_dir)); + + TF_ASSIGN_OR_RETURN(std::vector calibration_saved_model_files, + ListDirectory(calibration_saved_model_dir)); + if (!calibration_saved_model_files.empty()) { + // Translate `RepresentativeDatasetConfig`s to signature key -> + // `RepresentativeDatasetFile` mapping. + const auto dataset_configs = + config.calibration_options().representative_datasets(); + const std::vector dataset_config_vector( + dataset_configs.begin(), dataset_configs.end()); + TF_ASSIGN_OR_RETURN( + const auto representative_dataset_file_map, + CreateRepresentativeDatasetFileMap(dataset_config_vector)); + + // Run calibration on the exported model. + if (py_function_lib_->RunCalibration( + calibration_saved_model_dir, signature_keys_, tags_, + /*force_graph_mode_calibration=*/true, + representative_dataset_file_map) == std::nullopt) { + return absl::InternalError( + "CalibrationComponent error: Failed to run calibration."); + } } if (absl::Status status = AddCalibrationStatistics( diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.h b/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.h index a7f94e9f0a37bf..03d2dd933732d4 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.h +++ b/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.h @@ -77,10 +77,11 @@ class CalibrationComponent : public Component { // Exports `module_op` to SavedModel at `dst_saved_model_path`. This is used // to export the pre-calibrated `module_op` to SavedModel so that the // calibration process can use it to load and run the graph with the - // representative dataset. - absl::StatusOr ExportToSavedModel( - ModuleOp module_op, absl::string_view calibration_data_dir, - absl::string_view dst_saved_model_path); + // representative dataset. Returns a failure status if the export fails. + absl::Status ExportToSavedModel(ModuleOp module_op, + absl::string_view calibration_data_dir, + bool force_regenerate_calibration_data, + absl::string_view dst_saved_model_path); // Imports the SavedModel at `calibrated_saved_model_path` to `ModuleOp` after // running calibration. @@ -113,7 +114,8 @@ class CalibrationComponent : public Component { // Runs passes to prepare the calibration model. absl::Status RunCalibrationPasses(mlir::ModuleOp module_op, MLIRContext& ctx, - absl::string_view calibration_data_dir); + absl::string_view calibration_data_dir, + bool force_regenerate_calibration_data); } // namespace mlir::quant::stablehlo diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.cc b/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.cc index 141af8e06fc6db..ea96bd029b079e 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.cc +++ b/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.cc @@ -45,6 +45,8 @@ using ::tensorflow::quantization::PyFunctionLibrary; using CalibrationStatisticsFlatMap = absl::flat_hash_map; +} // namespace + // Reads the calibration statistics from the given directory. absl::StatusOr ReadStatistics( absl::string_view calibration_data_dir) { @@ -63,8 +65,6 @@ absl::StatusOr ReadStatistics( return statistics_map; } -} // namespace - absl::Status AddCalibrationStatistics( mlir::ModuleOp module_op, absl::string_view calibration_data_dir, const CalibrationOptions& calibration_options, @@ -102,4 +102,14 @@ absl::Status AddCalibrationStatistics( return status; } +bool IsCalibrationRequired(mlir::ModuleOp module_op) { + bool calibration_required = false; + module_op.walk( + [&calibration_required]( + mlir::TF::CalibrationStatisticsSaverOp statistics_saver_op) { + calibration_required = true; + }); + return calibration_required; +} + } // namespace stablehlo::quantization diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.h b/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.h index 48112b1eafc816..41f78be3578bca 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.h +++ b/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.h @@ -15,14 +15,24 @@ limitations under the License. #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_CALIBRATION_STATISTICS_H_ #define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_CALIBRATION_STATISTICS_H_ +#include + +#include "absl/container/flat_hash_map.h" #include "absl/status/status.h" +#include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h" +#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics.pb.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h" namespace stablehlo::quantization { +// Reads the calibration statistics from the given directory. +absl::StatusOr> +ReadStatistics(absl::string_view calibration_data_dir); + // Adds calibrated min / max values to CustomAggregator nodes in `graph_def`. // The min and max values will be added to the "min" and "max" attributes, // respectively. `calibration_options` provides the strategy to retrieve min and @@ -32,6 +42,9 @@ absl::Status AddCalibrationStatistics( const stablehlo::quantization::CalibrationOptions& calibration_options, const tensorflow::quantization::PyFunctionLibrary& py_function_library); +// Checks if the model required calibration. +bool IsCalibrationRequired(mlir::ModuleOp module_op); + } // namespace stablehlo::quantization #endif // TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_CALIBRATION_STATISTICS_H_ diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc b/tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc index 145533ddb07cc9..1522c68f300cba 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc +++ b/tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc @@ -176,7 +176,6 @@ void ExpandWeightOnlyPtqPreset(QuantizationConfig& config) { // user-provided explicit `QuantizationSpec`s will be appended. QuantizationSpecs new_specs{}; *new_specs.add_specs() = GetDefaultWeightOnlyPtqSpec(); - // TODO: b/307625297 - Add per-channel weight only support. // Append user-provided specs to override existing specs. const QuantizationSpecs& previous_specs = config.specs(); diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc b/tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc index 965da4ff998635..5575a7516fccc9 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc +++ b/tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc @@ -192,12 +192,12 @@ FailureOr ConvertPaddingAttr( const int64_t padding_nums_size = 2 * (rhs_shape.getRank() - 2); padding_nums.reserve(padding_nums_size); - if (conv_padding.strref().equals("EXPLICIT")) { + if (conv_padding.strref() == "EXPLICIT") { for (auto padding_elem : op.getExplicitPaddingAttr().template getAsRange()) { padding_nums.push_back(padding_elem.getInt()); } - } else if (conv_padding.strref().equals("VALID")) { + } else if (conv_padding.strref() == "VALID") { padding_nums.resize(padding_nums_size, 0); } else { padding_nums.resize(padding_nums_size); diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_calibration_statistics_saver.cc b/tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_calibration_statistics_saver.cc index 9e6bf25fb44811..8cb0b645c312cf 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_calibration_statistics_saver.cc +++ b/tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_calibration_statistics_saver.cc @@ -15,6 +15,8 @@ limitations under the License. #include #include #include +#include +#include #include "absl/strings/string_view.h" #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project @@ -48,11 +50,14 @@ std::string GetOutputFilePath(absl::string_view calibration_data_dir, } // Finds `CustomAggregator` ops and collects their outputs and attributes. -void FindCustomAggregatorOps(Region& region, - SmallVector& statistics_outputs, - SmallVector& ids, - SmallVector& calibration_methods) { +void FindCustomAggregatorOps( + Region& region, + const std::unordered_set& aggregator_ops_to_ignore, + SmallVector& statistics_outputs, SmallVector& ids, + SmallVector& calibration_methods) { for (auto op : region.getOps()) { + if (aggregator_ops_to_ignore.count(op.getId().str())) continue; + ids.push_back(op.getId()); calibration_methods.push_back(op.getCalibrationMethod()); statistics_outputs.push_back(op.getMin()); @@ -63,11 +68,13 @@ void FindCustomAggregatorOps(Region& region, // Inserts a `CalibrationStatisticsSaverOp` to the end of the region. LogicalResult InsertCalibrationStatisticsSaverOp( - Region& region, MLIRContext& ctx, absl::string_view output_file_path) { + Region& region, MLIRContext& ctx, absl::string_view output_file_path, + const std::unordered_set& aggregator_ops_to_ignore) { SmallVector statistics_outputs; SmallVector ids; SmallVector calibration_methods; - FindCustomAggregatorOps(region, statistics_outputs, ids, calibration_methods); + FindCustomAggregatorOps(region, aggregator_ops_to_ignore, statistics_outputs, + ids, calibration_methods); if (statistics_outputs.empty()) return failure(); OpBuilder builder(&ctx); @@ -115,6 +122,7 @@ bool ContainCalibrationStatisticsSaverOp(Operation* op) { } // namespace +#define GEN_PASS_DECL_INSERTCALIBRATIONSTATISTICSSAVERPASS #define GEN_PASS_DEF_INSERTCALIBRATIONSTATISTICSSAVERPASS #include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h.inc" @@ -126,11 +134,7 @@ class InsertCalibrationStatisticsSaverPass InsertCalibrationStatisticsSaverPass>:: InsertCalibrationStatisticsSaverPassBase; - explicit InsertCalibrationStatisticsSaverPass(StringRef calibration_data_dir) - : calibration_data_dir_(calibration_data_dir) {} - private: - std::string calibration_data_dir_; void runOnOperation() override; }; @@ -138,17 +142,22 @@ void InsertCalibrationStatisticsSaverPass::runOnOperation() { ModuleOp module_op = getOperation(); MLIRContext& ctx = getContext(); + std::unordered_set aggregator_ops_to_ignore( + aggregator_ops_to_ignore_.begin(), aggregator_ops_to_ignore_.end()); + // Insert CalibrationStatisticsSaverOp to the end of each region. for (auto func_op : module_op.getOps()) { int32_t output_file_idx = 0; StringRef func_name = func_op.getSymName(); - func_op.walk([&output_file_idx, &ctx, &func_name, this](Operation* op) { + func_op.walk([&output_file_idx, &ctx, &func_name, &aggregator_ops_to_ignore, + this](Operation* op) { for (Region& region : op->getRegions()) { if (succeeded(InsertCalibrationStatisticsSaverOp( region, ctx, GetOutputFilePath(calibration_data_dir_, func_name, - output_file_idx)))) { + output_file_idx), + aggregator_ops_to_ignore))) { ++output_file_idx; }; } @@ -167,9 +176,14 @@ void InsertCalibrationStatisticsSaverPass::runOnOperation() { } std::unique_ptr> -CreateInsertCalibrationStatisticsSaverPass(StringRef calibration_data_dir) { - return std::make_unique( - calibration_data_dir); +CreateInsertCalibrationStatisticsSaverPass( + StringRef calibration_data_dir, + const std::vector& aggregator_ops_to_ignore) { + InsertCalibrationStatisticsSaverPassOptions options = { + .aggregator_ops_to_ignore_ = aggregator_ops_to_ignore, + .calibration_data_dir_ = calibration_data_dir.str(), + }; + return std::make_unique(options); } } // namespace mlir::quant::stablehlo diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h b/tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h index a6836eaf1009cd..d13c589c2ba890 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h +++ b/tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h @@ -18,6 +18,7 @@ limitations under the License. #include #include +#include #include "absl/status/statusor.h" #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project @@ -46,7 +47,9 @@ CreateLiftQuantizableSpotsAsFunctionsPass( // Creates a pass that inserts CalibrationStatisticsSaverOp. std::unique_ptr> -CreateInsertCalibrationStatisticsSaverPass(StringRef calibration_data_dir); +CreateInsertCalibrationStatisticsSaverPass( + StringRef calibration_data_dir, + const std::vector& aggregator_ops_to_ignore); // Adds generated pass default constructors or options definitions. #define GEN_PASS_DECL diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td b/tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td index b61550e5347e36..7661e8d562fbe9 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td +++ b/tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td @@ -228,5 +228,12 @@ def InsertCalibrationStatisticsSaverPass : Pass<"stablehlo-insert-calibration-st `CalibrationStatisticsSaver` op at the end of the function to collect their statistics. }]; + let options = [ + ListOption<"aggregator_ops_to_ignore_", "aggregator-ops-to-ignore", "std::string", + "Ops to ignore when inserting CalibrationStatisticsSaver.">, + Option<"calibration_data_dir_", "calibration-data-dir", + "std::string", /*default=*/"", + "The directory to save calibration data.">, + ]; let dependentDialects = ["TF::TensorFlowDialect"]; } diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/python/BUILD b/tensorflow/compiler/mlir/quantization/stablehlo/python/BUILD index df5252b986adf5..0999d37da524c2 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/python/BUILD +++ b/tensorflow/compiler/mlir/quantization/stablehlo/python/BUILD @@ -30,6 +30,7 @@ package( pytype_strict_library( name = "quantization", srcs = ["quantization.py"], + visibility = ["//visibility:public"], deps = [ ":pywrap_quantization", "//tensorflow/compiler/mlir/quantization/stablehlo:quantization_config_proto_py", @@ -45,6 +46,10 @@ pytype_strict_library( # testonly = 1, # srcs = ["integration_test/quantize_model_test_base.py"], # tags = ["no_pip"], +# visibility = [ +# "//learning/brain/mlir/quantization/stablehlo:__subpackages__", +# "//tensorflow/compiler/mlir/quantization:__subpackages__", +# ], # deps = [ # "//third_party/py/mlir:ir", # "//third_party/py/mlir:stablehlo_dialect", @@ -62,6 +67,7 @@ pytype_strict_library( # "//tensorflow/python/ops:nn_ops", # "//tensorflow/python/ops:variables", # "//tensorflow/python/platform:client_testlib", +# "//tensorflow/python/platform:tf_logging", # "//tensorflow/python/saved_model:load", # "//tensorflow/python/saved_model:loader", # "//tensorflow/python/saved_model:save", diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py b/tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py index 5e9f9955f329f3..ab0fb1d5662bba 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py +++ b/tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py @@ -577,6 +577,114 @@ def data_gen() -> repr_dataset.RepresentativeDataset: 0.65, ) + def test_reuse_calibration_data(self): + _, y_shape, bias_shape, x_signature, y_signature = ( + self._prepare_sample_einsum_datashapes('abc,cde->abde', use_bias=True) + ) + + self._create_einsum_model( + self._input_saved_model_path, + 'abc,cde->abde', + y_shape, + x_signature, + y_signature, + bias_shape, + ) + + # Generate model input data. + rng = np.random.default_rng(seed=42) + input_data = ops.convert_to_tensor( + rng.uniform(low=0.0, high=1.0, size=x_signature).astype('f4') + ) + + def data_gen() -> repr_dataset.RepresentativeDataset: + for _ in range(100): + yield { + 'x': ops.convert_to_tensor( + np.random.uniform(low=0.0, high=1.0, size=x_signature).astype( + 'f4' + ) + ), + } + + dataset_path = self.create_tempfile('tfrecord').full_path + path_map = {'serving_default': dataset_path} + repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( + {'serving_default': data_gen()} + ) + + calibration_data_dir = self.create_tempdir('calibration_data').full_path + config = qc.QuantizationConfig( + static_range_ptq_preset=qc.StaticRangePtqPreset( + representative_datasets=[ + qc.RepresentativeDatasetConfig( + tf_record=qc.TfRecordFile(path=dataset_path) + ) + ] + ), + tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), + calibration_options=qc.CalibrationOptions( + calibration_method=_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX, + calibration_data_dir=calibration_data_dir, + ), + ) + + # Run quantization the first time, calibration is expected to be run. + with self.assertLogs(level='INFO') as info_logs: + quantization.quantize_saved_model( + self._input_saved_model_path, + self._output_saved_model_path, + config, + ) + self.assertTrue( + self._any_log_contains( + 'Calibration step is executed in graph mode.', + info_logs.records, + ) + ) + module_str = self._extract_first_xla_call_module_op( + self._output_saved_model_path + ) + self.assertTrue( + re.search('stablehlo.dot_general.*xi8>.*xi8>.*xi32>', module_str) + ) + + # Run quantization the first time, calibration is expected to be skipped. + output_saved_model_path_2 = self.create_tempdir('output2').full_path + with self.assertLogs(level='INFO') as info_logs: + quantization.quantize_saved_model( + self._input_saved_model_path, + output_saved_model_path_2, + config, + ) + self.assertFalse( + self._any_log_contains( + 'Calibration step is executed in graph mode.', + info_logs.records, + ) + ) + module_str = self._extract_first_xla_call_module_op( + output_saved_model_path_2 + ) + self.assertTrue( + re.search('stablehlo.dot_general.*xi8>.*xi8>.*xi32>', module_str) + ) + + # Expect both quantized model to produce the same results. + root = load.load(self._output_saved_model_path) + self.assertCountEqual(root.signatures.keys(), {'serving_default'}) + new_outputs_1 = root.signatures['serving_default']( + x=ops.convert_to_tensor(input_data) + ) + + root = load.load(output_saved_model_path_2) + self.assertCountEqual(root.signatures.keys(), {'serving_default'}) + new_outputs_2 = root.signatures['serving_default']( + x=ops.convert_to_tensor(input_data) + ) + + self.assertAllClose(new_outputs_1, new_outputs_2) + @parameterized.named_parameters( ('use_constant_with_int32_input', np.int32, False), ('use_variable_with_int32_input', np.int32, True), diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py b/tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py index 31c53a4cf20fe9..fef1784fec9370 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py +++ b/tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py @@ -33,11 +33,13 @@ from tensorflow.python.ops import nn_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test +from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import load from tensorflow.python.saved_model import loader_impl from tensorflow.python.saved_model import save as saved_model_save from tensorflow.python.types import core + FUNC_ALIAS = 'some_alias' @@ -164,6 +166,27 @@ def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: ) return model + def _any_log_contains( + self, substring: str, log_record_list: List['logging.LogRecord'] + ) -> bool: + """Returns True if any of the log contains a given substring. + + Args: + substring: A piece of string to check whether it exists in the log + message. + log_record_list: A list of `absl.logging.LogRecord`s. + + Returns: + True if and only if the substring exists in any of the log in + `log_record_list`. + """ + return any( + map( + lambda log_record: substring in str(log_record.message), + log_record_list, + ) + ) + def _create_matmul_and_same_scale_model( self, input_shape: Sequence[int], diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/python/pywrap_quantization_lib.cc b/tensorflow/compiler/mlir/quantization/stablehlo/python/pywrap_quantization_lib.cc index 3b5ece120bdeb0..517bd117348072 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/python/pywrap_quantization_lib.cc +++ b/tensorflow/compiler/mlir/quantization/stablehlo/python/pywrap_quantization_lib.cc @@ -25,6 +25,7 @@ limitations under the License. #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h" +#include "tensorflow/core/protobuf/meta_graph.pb.h" namespace stablehlo::quantization::pywrap { diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto b/tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto index f156b66997211d..49e8161df3a749 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto +++ b/tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto @@ -77,8 +77,8 @@ message StaticRangePtqPreset { bool enable_full_int_quantization = 3; } -// Applies int8 per-tensor weight-only post-training quantization for all -// dot_general op. +// Applies int8 per-channel weight-only post-training quantization for all +// dot_general and convolution ops. message WeightOnlyPtqPreset {} // Metadata specific to the input TensorFlow SavedModel, which may be required @@ -278,7 +278,7 @@ message DebuggerConfig { } // Defines various calibration options. -// Next ID: 5 +// Next ID: 6 message CalibrationOptions { // Configurations for calibration methods. // Next ID: 7 @@ -332,8 +332,13 @@ message CalibrationOptions { // representative dataset used to calibrate a function. repeated RepresentativeDatasetConfig representative_datasets = 3; - // The path to save calibration statistics data. + // The path to save calibration statistics data. If not set, use a temporary + // directory. string calibration_data_dir = 4; + + // Whether to reuse the existing calibration data in `calibration_data_dir`. + // Default to False. + bool force_regenerate_calibration_data = 5; } // Quantization configuration for StableHLO Quantizer. This is the primary diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir b/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir new file mode 100644 index 00000000000000..97d546afe2b723 --- /dev/null +++ b/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver_with_skipping.mlir @@ -0,0 +1,47 @@ +// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-insert-calibration-statistics-saver='aggregator-ops-to-ignore=skipping_id' | FileCheck %s + +func.func @serving_default(%arg0: tensor<1x3x4x3xf32>) -> (tensor<1x2x2x2xf32>) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_tensor:0", outputs = "PartitionedCall:0"}} { + %cst = "tf.Const"() <{value = dense<[[[[-0.891899645, 0.392044574], [0.77720493, 1.31188095], [0.255048186, 2.700150e+00]], [[-1.08111858, -0.406604826], [-0.298575521, -2.25356531], [-1.00201964, 2.54532099]], [[-1.34911358, 0.279911458], [-0.868258893, -1.36708188], [0.866317451, -2.05804896]]], [[[-0.591397941, 0.331505477], [0.715151429, 2.64073896], [1.27163255, 0.206143498]], [[0.474211812, 1.45044816], [0.119936548, 2.54149938], [-0.939900994, 0.438387245]], [[-1.12486279, -1.09022558], [0.82202208, 1.04652023], [1.30316162, 2.62054276]]]]> : tensor<2x3x3x2xf32>}> : () -> tensor<2x3x3x2xf32> + %output, %min, %max, %histogram = "tf.CustomAggregator"(%arg0) <{calibration_method = 5 : i32, id = "skipping_id", num_bins = 32 : i32, max_percentile = 0.000000e+00 : f32, min_percentile = 0.000000e+00 : f32}> : (tensor<1x3x4x3xf32>) -> (tensor<1x3x4x3xf32>, tensor, tensor, tensor<512xi64>) + %0 = "tf.Conv2D"(%output, %cst) <{data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 2, 2, 1], use_cudnn_on_gpu = true}> {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", device = ""} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x2x2x2xf32> + %output_1, %min_2, %max_3, %histogram_4 = "tf.CustomAggregator"(%0) <{calibration_method = 5 : i32, id = "keeping_id", num_bins = 32 : i32, max_percentile = 0.000000e+00 : f32, min_percentile = 0.000000e+00 : f32}> : (tensor<1x2x2x2xf32>) -> (tensor<1x2x2x2xf32>, tensor, tensor, tensor<512xi64>) + %1 = "tf.Identity"(%output_1) {device = ""} : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> + return %1 : tensor<1x2x2x2xf32> +} +// CHECK-LABEL: @serving_default +// CHECK: %[[CUSTOM_AGGREGATOR_0:.*]], %[[MIN_O:.*]], %[[MAX_O:.*]], %[[HISTOGRAM_0:.*]] = "tf.CustomAggregator" +// CKECK-SAME: <{calibration_method = 5 : i32, id = "skipping_id", num_bins = 32 : i32, max_percentile = 0.000000e+00 : f32, min_percentile = 0.000000e+00 : f32}> : (tensor<1x3x4x3xf32>) -> (tensor<1x3x4x3xf32>, tensor, tensor, tensor<512xi64>) +// CHECK: %[[CUSTOM_AGGREGATOR_1:.*]], %[[MIN_1:.*]], %[[MAX_1:.*]], %[[HISTOGRAM_1:.*]] = "tf.CustomAggregator" +// CKECK-SAME: <{calibration_method = 5 : i32, id = "keeping_id", num_bins = 32 : i32, max_percentile = 0.000000e+00 : f32, min_percentile = 0.000000e+00 : f32}> : (tensor<1x3x4x3xf32>) -> (tensor<1x3x4x3xf32>, tensor, tensor, tensor<512xi64>) +// CHECK: "tf.CalibrationStatisticsSaver"(%[[MIN_1]], %[[MAX_1]], %[[HISTOGRAM_1]]) +// CHECK-SAME: <{calibration_methods = [5 : i32], ids = ["keeping_id"], output_file_path = "serving_default_0.pb"}> : (tensor, tensor, tensor<512xi64>) -> () +// CHECK: return + +// ----- + +module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 1836 : i32}, tf_saved_model.semantics} { + func.func @main(%arg0: tensor<10x1x1024xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<10x1x3xf32> {tf_saved_model.index_path = ["output"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_input_tensor:0", outputs = "PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} { + %cst = stablehlo.constant dense<0.000000e+00>: tensor<10x1024x3xf32> + %output, %min, %max, %histogram = "tf.CustomAggregator"(%arg0) <{calibration_method = 1 : i32, id = "skipping_id", max_percentile = 0.000000e+00 : f32, min_percentile = 0.000000e+00 : f32, num_bins = 0 : i32}> : (tensor<10x1x1024xf32>) -> (tensor<10x1x1024xf32>, tensor, tensor, tensor<0xi64>) + %0 = "tf.XlaCallModule"(%output, %cst) <{Sout = [#tf_type.shape<10x1x3>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_with_relu_fn_1, _original_entry_function = "composite_dot_general_with_relu_fn_1", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}, _tfl_quant_trait = "fully_quantizable"} : (tensor<10x1x1024xf32>, tensor<10x1024x3xf32>) -> tensor<10x1x3xf32> + %output_0, %min_1, %max_2, %histogram_3 = "tf.CustomAggregator"(%0) <{calibration_method = 1 : i32, id = "keeping_id", max_percentile = 0.000000e+00 : f32, min_percentile = 0.000000e+00 : f32, num_bins = 0 : i32}> : (tensor<10x1x3xf32>) -> (tensor<10x1x3xf32>, tensor, tensor, tensor<0xi64>) + return %output_0 : tensor<10x1x3xf32> + } + // CHECK-LABEL: @main + // CHECK: %[[CUSTOM_AGGREGATOR_0:.*]], %[[MIN_O:.*]], %[[MAX_O:.*]], %[[HISTOGRAM_0:.*]] = "tf.CustomAggregator" + // CKECK-SAME: <{calibration_method = 1 : i32, id = "skipping_id", max_percentile = 0.000000e+00 : f32, min_percentile = 0.000000e+00 : f32, num_bins = 0 : i32}> + // CHECK: %[[CUSTOM_AGGREGATOR_1:.*]], %[[MIN_1:.*]], %[[MAX_1:.*]], %[[HISTOGRAM_1:.*]] = "tf.CustomAggregator" + // CKECK-SAME: <{calibration_method = 1 : i32, id = "keeping_id", max_percentile = 0.000000e+00 : f32, min_percentile = 0.000000e+00 : f32, num_bins = 0 : i32}> + // CHECK: "tf.CalibrationStatisticsSaver"(%[[MIN_1]], %[[MAX_1]], %[[HISTOGRAM_1]]) + // CHECK-SAME: <{calibration_methods = [1 : i32], ids = ["keeping_id"], output_file_path = "main_0.pb"}> : (tensor, tensor, tensor<0xi64>) -> () + // CHECK: return + + func.func private @composite_dot_general_with_relu_fn_1(%arg0: tensor<10x1x1024xf32>, %arg1: tensor<10x1024x3xf32>) -> tensor<10x1x3xf32> attributes {_from_xla_call_module, tf_quant.composite_function} { + %cst = stablehlo.constant dense<0.000000e+00> : tensor<10x1x3xf32> + %0 = stablehlo.dot_general %arg0, %arg1, batching_dims = [0] x [0], contracting_dims = [2] x [1], precision = [DEFAULT, DEFAULT] {mhlo.frontend_attributes = {grad_x = "false", grad_y = "false"}} : (tensor<10x1x1024xf32>, tensor<10x1024x3xf32>) -> tensor<10x1x3xf32> + %1 = stablehlo.maximum %0, %cst : tensor<10x1x3xf32> + return %1 : tensor<10x1x3xf32> + } + // CHECK-LABEL: func.func private @composite_dot_general_with_relu_fn_1 + // CHECK-NOT: "tf.CalibrationStatisticsSaver" +} \ No newline at end of file diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_same_scale.mlir b/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_same_scale.mlir index 25aab3044a3496..7a905dfbe58a9e 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_same_scale.mlir +++ b/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_same_scale.mlir @@ -291,17 +291,17 @@ module attributes {tf_saved_model.semantics} { // CHECK-SAME: %[[ARG2:.*]]: tensor<2x3x2xi64> func.func private @composite_and_gather(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x5x2xf32>, %arg2: tensor<2x3x2xi64>) -> tensor<2x3x2x2xf32> { // CHECK: %[[Q1:.*]] = "quantfork.qcast"(%[[ARG0]]) {volatile} : (tensor<3x4x5xf32>) -> tensor<3x4x5x!quant.uniform> - // CHECK: %[[Q2:.*]] = "quantfork.qcast"(%[[ARG1]]) {volatile} : (tensor<3x5x2xf32>) -> tensor<3x5x2x!quant.uniform:f32, 6.000000e-03:13>> + // CHECK: %[[Q2:.*]] = "quantfork.qcast"(%[[ARG1]]) {volatile} : (tensor<3x5x2xf32>) -> tensor<3x5x2x!quant.uniform:f32, 6.000000e-03>> // CHECK: %[[CALL:.*]] = call @quantized_dot_general_fn_1(%[[Q1]], %[[Q2]]) - // CHECK-SAME: (tensor<3x4x5x!quant.uniform>, tensor<3x5x2x!quant.uniform:f32, 6.000000e-03:13>>) -> tensor<3x4x2x!quant.uniform> + // CHECK-SAME: (tensor<3x4x5x!quant.uniform>, tensor<3x5x2x!quant.uniform:f32, 6.000000e-03>>) -> tensor<3x4x2x!quant.uniform> // CHECK: %[[GATHER:.*]] = "stablehlo.gather"(%[[CALL]], %[[ARG2]]) // CHECK-SAME: (tensor<3x4x2x!quant.uniform>, tensor<2x3x2xi64>) -> tensor<2x3x2x2x!quant.uniform> // CHECK: %[[DQ:.*]] = "quantfork.dcast"(%[[GATHER]]) : (tensor<2x3x2x2x!quant.uniform>) -> tensor<2x3x2x2xf32> // CHECK: return %[[DQ]] %0 = "quantfork.qcast"(%arg0) {volatile} : (tensor<3x4x5xf32>) -> tensor<3x4x5x!quant.uniform> %1 = "quantfork.dcast"(%0) : (tensor<3x4x5x!quant.uniform>) -> tensor<3x4x5xf32> - %2 = "quantfork.qcast"(%arg1) {volatile} : (tensor<3x5x2xf32>) -> tensor<3x5x2x!quant.uniform:f32, 6.000000e-03:13>> - %3 = "quantfork.dcast"(%2) : (tensor<3x5x2x!quant.uniform:f32, 6.000000e-03:13>>) -> tensor<3x5x2xf32> + %2 = "quantfork.qcast"(%arg1) {volatile} : (tensor<3x5x2xf32>) -> tensor<3x5x2x!quant.uniform:f32, 6.000000e-03>> + %3 = "quantfork.dcast"(%2) : (tensor<3x5x2x!quant.uniform:f32, 6.000000e-03>>) -> tensor<3x5x2xf32> %4 = "tf.XlaCallModule"(%1, %3) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _quantization_method = "static_range_ptq {}", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<3x4x5xf32>, tensor<3x5x2xf32>) -> tensor<3x4x2xf32> %5 = "quantfork.qcast"(%4) {volatile} : (tensor<3x4x2xf32>) -> tensor<3x4x2x!quant.uniform> %6 = "quantfork.dcast"(%5) : (tensor<3x4x2x!quant.uniform>) -> tensor<3x4x2xf32> @@ -321,10 +321,10 @@ module attributes {tf_saved_model.semantics} { // CHECK: quantized_dot_general_fn_1 // CHECK-SAME: %[[ARG2:.*]]: tensor<3x4x5x!quant.uniform> - // CHECK-SAME: %[[ARG3:.*]]: tensor<3x5x2x!quant.uniform:f32, 6.000000e-03:13>> + // CHECK-SAME: %[[ARG3:.*]]: tensor<3x5x2x!quant.uniform:f32, 6.000000e-03>> func.func private @composite_dot_general_fn_1(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x5x2xf32>) -> tensor<3x4x2xf32> attributes {_from_xla_call_module} { // CHECK: %[[DOT:.*]] = stablehlo.dot_general %[[ARG2]], %[[ARG3]] - // CHECK-SAME: (tensor<3x4x5x!quant.uniform>, tensor<3x5x2x!quant.uniform:f32, 6.000000e-03:13>>) -> tensor<3x4x2x!quant.uniform> + // CHECK-SAME: (tensor<3x4x5x!quant.uniform>, tensor<3x5x2x!quant.uniform:f32, 6.000000e-03>>) -> tensor<3x4x2x!quant.uniform> // CHECK: %[[Q3:.*]] = stablehlo.uniform_quantize %0 : (tensor<3x4x2x!quant.uniform>) -> tensor<3x4x2x!quant.uniform> // CHECK: return %[[Q3]] %0 = stablehlo.dot_general %arg0, %arg1, batching_dims = [0] x [0], contracting_dims = [2] x [1] : (tensor<3x4x5xf32>, tensor<3x5x2xf32>) -> tensor<3x4x2xf32> diff --git a/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_weight_only.mlir b/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_weight_only.mlir index 78a48a2d9373e1..e152a90ce72c3a 100644 --- a/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_weight_only.mlir +++ b/tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_weight_only.mlir @@ -6,8 +6,8 @@ module attributes {tf_saved_model.semantics} { func.func private @quantize_dot_general_fn(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> attributes {tf._original_func_name = "main_0"} { %cst = stablehlo.constant dense<3.000000e-01> : tensor<2x3xf32> - %0 = "quantfork.qcast"(%cst) : (tensor<2x3xf32>) -> tensor<2x3x!quant.uniform> - %1 = "quantfork.dcast"(%0) : (tensor<2x3x!quant.uniform>) -> tensor<2x3xf32> + %0 = "quantfork.qcast"(%cst) : (tensor<2x3xf32>) -> tensor<2x3x!quant.uniform> + %1 = "quantfork.dcast"(%0) : (tensor<2x3x!quant.uniform>) -> tensor<2x3xf32> %2 = "tf.XlaCallModule"(%arg0, %1) <{Sout = [#tf_type.shape<1x3>], dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64}> {_entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "weight_only_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = ""} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32> return %2 : tensor<1x3xf32> } @@ -21,15 +21,15 @@ module attributes {tf_saved_model.semantics} { // CHECK-LABEL: quantize_dot_general_fn // CHECK-SAME: %[[ARG0:.+]]: tensor<1x2xf32> // CHECK: %[[CST:.+]] = stablehlo.constant dense<3.000000e-01> : tensor<2x3xf32> -// CHECK: %[[Q:.+]] = "quantfork.qcast"(%[[CST]]) : (tensor<2x3xf32>) -> tensor<2x3x!quant.uniform> +// CHECK: %[[Q:.+]] = "quantfork.qcast"(%[[CST]]) : (tensor<2x3xf32>) -> tensor<2x3x!quant.uniform> // CHECK: %[[CALL:.+]] = call @quantized_dot_general_fn(%[[ARG0]], %[[Q]]) -// CHECK-SAME: {_quantization_method = "weight_only_ptq { }"} : (tensor<1x2xf32>, tensor<2x3x!quant.uniform>) -> tensor<1x3xf32> +// CHECK-SAME: {_quantization_method = "weight_only_ptq { }"} : (tensor<1x2xf32>, tensor<2x3x!quant.uniform>) -> tensor<1x3xf32> // CHECK: return %[[CALL]] // CHECK: quantized_dot_general_fn -// CHECK-SAME: (%[[ARG1:.+]]: tensor<1x2xf32>, %[[ARG2:.+]]: tensor<2x3x!quant.uniform>) -> tensor<1x3xf32> +// CHECK-SAME: (%[[ARG1:.+]]: tensor<1x2xf32>, %[[ARG2:.+]]: tensor<2x3x!quant.uniform>) -> tensor<1x3xf32> // CHECK: %[[DOT:.+]] = stablehlo.dot_general %[[ARG1]], %[[ARG2]] -// CHECK-SAME: (tensor<1x2xf32>, tensor<2x3x!quant.uniform>) -> tensor<1x3xf32> +// CHECK-SAME: (tensor<1x2xf32>, tensor<2x3x!quant.uniform>) -> tensor<1x3xf32> // CHECK: return %[[DOT]] // ----- diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.cc b/tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.cc index 565adebfe52300..64695d6719885d 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.cc +++ b/tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.cc @@ -71,7 +71,7 @@ LogicalResult FoldOperation(OpBuilder& builder, Operation* op, bool IsOperationFoldable(Operation* op) { if (isa(op)) return true; - if (!op->getDialect()->getNamespace().equals("tf") || !TF::CanBeFolded(op)) { + if (op->getDialect()->getNamespace() != "tf" || !TF::CanBeFolded(op)) { return false; } diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc index 470d134d527a0f..c5d7ca8e47f6f9 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc +++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc @@ -69,9 +69,9 @@ bool IsCallToQuantizableLiftedFunction(Operation *op) { TF::PartitionedCallOp call_op = dyn_cast_or_null(op); return call_op && call_op->hasAttrOfType(kQuantTraitAttrName) && - call_op->getAttrOfType(kQuantTraitAttrName) - .getValue() - .equals(QuantTraitValues[QuantizationTrait::FullyQuantizable]); + call_op->getAttrOfType(kQuantTraitAttrName).getValue() == + llvm::StringRef( + QuantTraitValues[QuantizationTrait::FullyQuantizable]); } // Returns the composite function name. diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_hashtable_ops_as_args.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_hashtable_ops_as_args.cc index f48d15dd81cbdb..18ee96bfe9422e 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_hashtable_ops_as_args.cc +++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_hashtable_ops_as_args.cc @@ -70,14 +70,15 @@ bool IsHashTableOp(Operation* op) { // Checks if the function is the main or initializer function. bool IsMainOrInitializerFunction(ModuleOp module, func::FuncOp func) { - if (func.getSymName().equals(tensorflow::kImportModelDefaultGraphFuncName) || - func.getSymName().equals(kTfQuantSaveFuncName)) { + if (func.getSymName() == + llvm::StringRef(tensorflow::kImportModelDefaultGraphFuncName) || + func.getSymName() == kTfQuantSaveFuncName) { return true; } for (func::FuncOp init_func : tf_saved_model::GetInitializerFunctions(module)) { - if (func.getSymName().equals(init_func.getSymName())) { + if (func.getSymName() == init_func.getSymName()) { return true; } } @@ -118,7 +119,7 @@ bool IsResourceInitialized(ModuleOp module_op, Operation* hash_table) { tf_saved_model::GetInitializerFunctions(module_op)) { for (Operation& op : init_func_op.getBody().getOps()) { StringRef other_shared_name = GetSharedName(&op); - if (IsHashTableOp(&op) && other_shared_name.equals(shared_name)) { + if (IsHashTableOp(&op) && other_shared_name == shared_name) { return true; } } diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc index b0a84d71c84182..a87245345f6987 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc +++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc @@ -157,8 +157,8 @@ LogicalResult MatchSupportedAffineOp(Operation* op, Value& binding_output, bool is_supported_affine_op = false; if (llvm::isa(op)) { if (const auto data_format = op->getAttrOfType("data_format")) { - is_supported_affine_op = data_format.getValue().equals("NHWC") || - data_format.getValue().equals("NDHWC"); + is_supported_affine_op = + data_format.getValue() == "NHWC" || data_format.getValue() == "NDHWC"; } } else if (llvm::isa(op)) { if (const auto adj_y = op->getAttrOfType("adj_y")) { diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/python/BUILD b/tensorflow/compiler/mlir/quantization/tensorflow/python/BUILD index d8767cca744fa2..2ae799cca85968 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow/python/BUILD +++ b/tensorflow/compiler/mlir/quantization/tensorflow/python/BUILD @@ -45,6 +45,7 @@ cc_library( "//tensorflow/compiler/mlir/quantization/stablehlo/cc:saved_model_export", "//tensorflow/compiler/mlir/quantization/stablehlo/cc:saved_model_import", "//tensorflow/compiler/mlir/quantization/stablehlo/cc:types", + "//tensorflow/compiler/mlir/quantization/stablehlo/cc:weight_only_ptq", "//tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration:component", "//tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration:statistics", "//tensorflow/compiler/mlir/quantization/tensorflow:exported_model_proto_cc", diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py b/tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py index e9c18a66943ff9..b44c788bc10f7b 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py +++ b/tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py @@ -2710,6 +2710,116 @@ def data_gen() -> repr_dataset.RepresentativeDataset: self.assertAllClose(new_outputs, got_outputs, atol=0.097) self.assertAllClose(new_outputs, expected_outputs, atol=0.057) + def test_reuse_calibration_data(self): + model = self._create_simple_gather_and_conv_model( + dtypes.int32, filter_shape=(2, 3, 3, 1024) + ) + saved_model_save.save(model, self._input_saved_model_path) + + data_gen = self._create_data_generator( + input_key='input_tensor', + shape=[50], + minval=0, + maxval=64, + dtype=dtypes.int32, + ) + + tags = {tag_constants.SERVING} + + calibration_data_dir = self.create_tempdir('calibration_data').full_path + quantization_options = quant_opts_pb2.QuantizationOptions( + quantization_method=quant_opts_pb2.QuantizationMethod( + preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8 + ), + tags=tags, + signature_keys=['serving_default'], + op_set=quant_opts_pb2.XLA, + force_graph_mode_calibration=True, + calibration_options=stablehlo_quant_config_pb2.CalibrationOptions( + calibration_method=_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX, + calibration_data_dir=calibration_data_dir, + ), + ) + + # Run quantization the first time, calibration is expected to be run. + with self.assertLogs(level='INFO') as info_logs: + # Save the logger verbosity. + prev_log_level = logging.get_verbosity() + logging.set_verbosity(logging.INFO) + try: + converted_model1 = quantize_model.quantize( + self._input_saved_model_path, + self._output_saved_model_path, + quantization_options, + representative_dataset=data_gen, + ) + finally: + # Restore the logger verbosity. + logging.set_verbosity(prev_log_level) + + self.assertNotEmpty(info_logs.records) + self.assertTrue( + self._any_log_contains( + 'Calibration step is executed in graph mode.', + info_logs.records, + ) + ) + self.assertIsNotNone(converted_model1) + self.assertCountEqual( + converted_model1.signatures._signatures.keys(), {'serving_default'} + ) + + output_loader = saved_model_loader.SavedModelLoader( + self._output_saved_model_path + ) + output_graphdef = output_loader.get_meta_graph_def_from_tags( + tags + ).graph_def + self.assertTrue(self._contains_op(output_graphdef, 'XlaConvV2')) + + # Run quantization the first time, calibration is expected to be skipped. + with self.assertLogs(level='INFO') as info_logs: + # Save the logger verbosity. + prev_log_level = logging.get_verbosity() + logging.set_verbosity(logging.INFO) + try: + converted_model2 = quantize_model.quantize( + self._input_saved_model_path, + self._output_saved_model_path, + quantization_options, + representative_dataset=data_gen, + overwrite_output_directory=True, + ) + finally: + # Restore the logger verbosity. + logging.set_verbosity(prev_log_level) + + self.assertNotEmpty(info_logs.records) + self.assertFalse( + self._any_log_contains( + 'Calibration step is executed in graph mode.', + info_logs.records, + ) + ) + self.assertIsNotNone(converted_model2) + self.assertCountEqual( + converted_model2.signatures._signatures.keys(), {'serving_default'} + ) + + # Expect two models to produce the same results. + test_data = ops.convert_to_tensor( + np.random.uniform(low=0, high=64, size=(32)).astype( + dtypes.int32.as_numpy_dtype + ) + ) + new_outputs_1 = converted_model1.signatures['serving_default']( + input_tensor=test_data + )['output'] + new_outputs_2 = converted_model2.signatures['serving_default']( + input_tensor=test_data + )['output'] + self.assertAllClose(new_outputs_1, new_outputs_2) + @test_util.run_in_graph_and_eager_modes def test_function_alias_preserved(self): model = self._create_conv2d_model( @@ -5391,6 +5501,7 @@ def test_einsum_model( @parameterized.named_parameters( ('to_xla_per_tensor', quant_opts_pb2.XLA, False), + ('stablehlo_per_channel', quant_opts_pb2.STABLEHLO, True), ) @test_util.run_in_graph_and_eager_modes def test_matmul_model( @@ -5432,8 +5543,14 @@ def test_matmul_model( ) output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def + if target_opset == quant_opts_pb2.XLA: + self.assertTrue(self._contains_op(output_graphdef, 'XlaDotV2')) + elif target_opset == quant_opts_pb2.STABLEHLO: + # This is to verify the invocation of StableHLO quantizer works. More + # thorough functional tests are in StableHLO quantizer directory. + self.assertTrue(self._contains_op(output_graphdef, 'XlaCallModule')) + # Due to other meta data, the compression is not exactly 1/4. - self.assertTrue(self._contains_op(output_graphdef, 'XlaDotV2')) self.assertLess( testing.get_size_ratio( self._output_saved_model_path, self._input_saved_model_path @@ -5443,6 +5560,7 @@ def test_matmul_model( @parameterized.named_parameters( ('to_xla_per_tensor', quant_opts_pb2.XLA, False), + ('stablehlo_per_channel', quant_opts_pb2.STABLEHLO, True), # TODO: b/289761265 - [Converter Component][TF-Quantizer] Improve Weight- # only Quantization # Enable this back once new weight-only quantizer is supported for per- @@ -5502,7 +5620,7 @@ def test_conv_model( 0.3, ) - if enable_per_channel_quantization: + if enable_per_channel_quantization and target_opset == quant_opts_pb2.XLA: per_channel_size_attr = attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue( shape=[ @@ -5521,6 +5639,12 @@ def test_conv_model( output_graphdef, 'Const', '_output_shapes', per_channel_size_attr ) ) + if target_opset == quant_opts_pb2.XLA: + self.assertTrue(self._contains_op(output_graphdef, 'XlaConvV2')) + elif target_opset == quant_opts_pb2.STABLEHLO: + # This is to verify the invocation of StableHLO quantizer works. More + # thorough functional tests are in StableHLO quantizer directory. + self.assertTrue(self._contains_op(output_graphdef, 'XlaCallModule')) input_tensor = array_ops.constant( np.random.uniform(low=0, high=0.1, size=input_shape), diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc b/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc index e055d9dda95bbb..e38310879184ef 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc +++ b/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc @@ -47,6 +47,7 @@ limitations under the License. #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h" +#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h" #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/run_passes.h" @@ -80,10 +81,12 @@ using ::mlir::quant::stablehlo::PostCalibrationComponent; using ::mlir::quant::stablehlo::PreCalibrationComponent; using ::mlir::quant::stablehlo::RunCalibrationPasses; using ::mlir::quant::stablehlo::UpdateFunctionAliases; +using ::mlir::quant::stablehlo::WeightOnlyPtqComponent; using ::stablehlo::quantization::AddCalibrationStatistics; using ::stablehlo::quantization::ChangeToQuantizedFilename; using ::stablehlo::quantization::DebuggerConfig; using ::stablehlo::quantization::ExpandPresets; +using ::stablehlo::quantization::IsCalibrationRequired; using ::stablehlo::quantization::PopulateDefaults; using ::stablehlo::quantization::QuantizationConfig; using ::stablehlo::quantization::io::CreateTmpDir; @@ -163,7 +166,10 @@ absl::StatusOr ExportCalibrationModel( mlir::OwningOpRef cloned_module_ref(module_op.clone()); TF_RETURN_IF_ERROR( - RunCalibrationPasses(*cloned_module_ref, *context, calibration_data_dir)); + RunCalibrationPasses(*cloned_module_ref, *context, calibration_data_dir, + quantization_options.calibration_options() + .force_regenerate_calibration_data())); + if (!IsCalibrationRequired(*cloned_module_ref)) return ExportedModel(); absl::StatusOr exported_model = ModuleOpToExportedModel( *cloned_module_ref, context, kTfQuantPtqPreCalibrationStepName, @@ -219,6 +225,20 @@ QuantizationConfig GetQuantizationConfigForStaticRangePtq( return ExpandPresets(PopulateDefaults(quantization_config)); } +QuantizationConfig GetQuantizationConfigForWeightOnlyPtq( + const QuantizationOptions &quantization_options) { + QuantizationConfig quantization_config{}; + quantization_config.mutable_weight_only_ptq_preset(); + // When targeting server TPUs quantized types should be unpacked into + // integer ops. + quantization_config.mutable_pipeline_config()->set_unpack_quantized_types( + true); + *quantization_config.mutable_debugger_config() = + quantization_options.debugger_config(); + + return ExpandPresets(PopulateDefaults(quantization_config)); +} + absl::StatusOr QuantizePtqModelPreCalibrationImpl( mlir::ModuleOp module_op, mlir::MLIRContext *context, const QuantizationOptions &quantization_options, @@ -381,6 +401,7 @@ absl::StatusOr QuantizeWeightOnly( "Failed to get function alias: ", function_aliases.status().message())); } + const bool is_stablehlo = quantization_options.op_set() == OpSet::STABLEHLO; absl::StatusOr> module = ImportAndPreprocessSavedModel( saved_model_path, @@ -388,7 +409,8 @@ absl::StatusOr QuantizeWeightOnly( quantization_options.signature_keys().end()}, {quantization_options.tags().begin(), quantization_options.tags().end()}, - context.get(), /*is_inliner_run=*/true, /*run_tf_to_stablehlo=*/false, + context.get(), /*is_inliner_run=*/true, + /*run_tf_to_stablehlo=*/is_stablehlo, /*deserialize_xla_call_module=*/false, *function_aliases); if (!module.status().ok()) { return absl::InternalError( @@ -397,14 +419,24 @@ absl::StatusOr QuantizeWeightOnly( } mlir::OwningOpRef module_ref = std::move(module).value(); - TF_RETURN_IF_ERROR(RunPasses( - kTfQuantWeightOnlyStepName, - /*add_passes_func=*/ - [&quantization_options](mlir::PassManager &pm) { - AddQuantizeWeightOnlyPasses(pm, quantization_options, - kTfQuantWeightOnlyStepName); - }, - *context, *module_ref)); + // Use StableHLO Quantizer option if opset is specified. + if (is_stablehlo) { + const QuantizationConfig quantization_config = + GetQuantizationConfigForWeightOnlyPtq(quantization_options); + + WeightOnlyPtqComponent weight_only_ptq_component(context.get()); + TF_ASSIGN_OR_RETURN(*module_ref, weight_only_ptq_component.Run( + *module_ref, quantization_config)); + } else { + TF_RETURN_IF_ERROR(RunPasses( + kTfQuantWeightOnlyStepName, + /*add_passes_func=*/ + [&quantization_options](mlir::PassManager &pm) { + AddQuantizeWeightOnlyPasses(pm, quantization_options, + kTfQuantWeightOnlyStepName); + }, + *context, *module_ref)); + } return ModuleOpToExportedModel( *module_ref, context.get(), kTfQuantWeightOnlyStepName, @@ -457,16 +489,18 @@ absl::StatusOr QuantizeStaticRangePtq( *function_aliases, calibration_data_dir)); // Save and run the calibration model. - TF_ASSIGN_OR_RETURN(std::string precalibrated_saved_model_dir, - CreateTmpDir()); - py_function_library.SaveExportedModel( - precalibrated_saved_model_dir, calibration_exported_model, - saved_model_path, tags, signature_def_map); - - py_function_library.RunCalibration( - precalibrated_saved_model_dir, signature_keys, tags, - quantization_options.force_graph_mode_calibration(), - representative_dataset_file_map_serialized); + if (calibration_exported_model.has_graph_def()) { + TF_ASSIGN_OR_RETURN(std::string calibration_saved_model_dir, + CreateTmpDir()); + py_function_library.SaveExportedModel( + calibration_saved_model_dir, calibration_exported_model, + saved_model_path, tags, signature_def_map); + + py_function_library.RunCalibration( + calibration_saved_model_dir, signature_keys, tags, + quantization_options.force_graph_mode_calibration(), + representative_dataset_file_map_serialized); + } if (absl::Status status = AddCalibrationStatistics( *module_ref, calibration_data_dir, diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py b/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py index 92ee947c1c3c2c..f7dec2d2a5dee7 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py +++ b/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py @@ -638,7 +638,7 @@ def _populate_calibration_options( if calib_opts.calibration_data_dir: save_model.create_empty_output_dir( calib_opts.calibration_data_dir, - overwrite=True, + overwrite=calib_opts.force_regenerate_calibration_data, ) @@ -741,10 +741,12 @@ def _populate_quantization_options_default_values( if (quantization_options.op_set == quant_opts_pb2.OpSet.STABLEHLO) and ( quantization_options.quantization_method.preset_method != _PresetMethod.METHOD_STATIC_RANGE_INT8 + and quantization_options.quantization_method.preset_method + != _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8 ): raise ValueError( 'StableHLO quantized opset currently only supports static range' - ' quantization via TF Quantizer.' + ' quantization and weight-only quantizationvia TF Quantizer.' ) # Set `force_graph_mode_calibration` to True to avoid skipping op execution, diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc b/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc index 7054f049d1369c..b22726de30aeaa 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc +++ b/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc @@ -160,7 +160,7 @@ Value CalculatePaddingAndPadIfNeeded(OpBuilder &builder, Location loc, bool has_dynamic_spatial_dim = absl::c_any_of( spatial_dims, [&input_shape](int64_t dim) { return input_shape.isDynamicDim(dim); }); - if (conv_padding.strref().equals("SAME") && has_dynamic_spatial_dim) { + if (conv_padding.strref() == "SAME" && has_dynamic_spatial_dim) { return PadForDynamicShapedInputSamePadding( builder, loc, input, filter, input_zp_value, strides, dilations, conv_padding, padding, num_dims); @@ -168,7 +168,7 @@ Value CalculatePaddingAndPadIfNeeded(OpBuilder &builder, Location loc, ShapedType filter_shape = mlir::cast(filter.getType()); SmallVector padding_values(2 * num_dims, 0); - if (conv_padding.strref().equals("EXPLICIT")) { + if (conv_padding.strref() == "EXPLICIT") { if (explicit_paddings.size() != 2 * num_dims) { emitError(loc, absl::StrFormat( @@ -182,7 +182,7 @@ Value CalculatePaddingAndPadIfNeeded(OpBuilder &builder, Location loc, padding_values[2 * i + 1] = mlir::cast(explicit_paddings[2 * i + 1]).getInt(); } - } else if (conv_padding.strref().equals("SAME")) { + } else if (conv_padding.strref() == "SAME") { for (int i : spatial_dims) { int input_size = input_shape.getDimSize(i); int filter_size = filter_shape.getDimSize(i - 1); diff --git a/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/BUILD b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/BUILD index 922fc87b9d8ddf..ecc3c9e7ca6ebe 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/BUILD +++ b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/BUILD @@ -6,12 +6,10 @@ load("//tensorflow/compiler/mlir/quantization/stablehlo:internal_visibility_allo package_group( name = "internal_visibility_allowlist_package", packages = [ - "//learning/brain/mlir/quantization/stablehlo/python/integration_test/...", "//tensorflow/compiler/mlir/lite/...", "//tensorflow/compiler/mlir/quantization/...", "//tensorflow/compiler/mlir/tf2xla/transforms/...", "//tensorflow/lite/...", - "//third_party/cloud_tpu/inference_converter/...", # TPU Inference Converter V1 ] + internal_visibility_allowlist(), ) @@ -80,7 +78,6 @@ tf_cc_binary( glob_lit_tests( name = "all_tests", data = [":test_utilities"], - # TODO: b/288344501 - Enable OSS tests again when stable-quant-opt works well. default_tags = [ "no_oss", "no_pip", diff --git a/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/README.md b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/README.md index 8a6fd0f5105c88..a65de3c38df001 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/README.md +++ b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/README.md @@ -1,13 +1,11 @@ -## Tensorflow SavedModel to StableHLO (tf-to-stablehlo-translate) +# Tensorflow SavedModel to StableHLO (tf-to-stablehlo-translate) -### Description - -This tool converts TensorFlow models (SavedModel or MLIR module) to StableHLO -MLIR modules, preserving model structure and signatures. It enables seamless +Converts TensorFlow models (SavedModel or MLIR module) to StableHLO MLIR +modules, preserving model structure and signatures. It enables seamless integration of TensorFlow models into MLIR-based compiler frameworks for further optimization and deployment. -### Usage +## C++ APIs ```bash tf-to-stablehlo-translate \ @@ -58,3 +56,68 @@ tf-to-stablehlo-translate --input-arg-shapes=1,12:1,12:1,12 * TensorFlow * MLIR * Abseil (absl) + +## Python APIs + + +### `savedmodel_to_stablehlo` + +Converts a TensorFlow SavedModel into StableHLO bytecode. + +```Python +from tensorflow.compiler.mlir.quantization.tensorflow_to_stablehlo.python import pywrap_tensorflow_to_stablehlo as tf2shlo + +stablehlo_bytes = tf2shlo.savedmodel_to_stablehlo( + input_path="/path/to/your/savedmodel", + exported_model_signatures=["serving_default"], + tag_names=["serve"], + input_arg_shapes_str="1,28,28,3::32" +) + +``` + +#### Arguments: + +* `input_path` (required): Path to your SavedModel directory. +* `exported_model_signatures` (optional): List of signature names to convert. + Defaults to ["serving_default"]. +* `tag_names` (optional): List of tags associated with the SavedModel. Defaults + to ["serve"]. +* `input_arg_shapes_str` (optional): A string representation of input argument + shapes for 'main' entry-point, separating + tensors with ':', dimension with ',', and + using '?' for unknown sizes. For example, + `input-arg-shapes=1,2::1,?` expresses + argument shapes `[1,2], [] and [1,?]`. + +#### Error Handling + +An exception will be raised with details about the error. + +### `tensorflow_module_to_stablehlo` + +Converts a TensorFlow MLIR module string into StableHLO bytecode. + +```Python +from tensorflow.compiler.mlir.quantization.tensorflow_to_stablehlo.python import pywrap_tensorflow_to_stablehlo as tf2shlo + +stablehlo_bytes = tf2shlo.tensorflow_module_to_stablehlo( + module_op_str="your_tensorflow_mlir_module_string", + input_arg_shapes_str="1,28,28,3::32" +) +``` + +#### Arguments: + +* `module_op_str` (required): String containing the TensorFlow MLIR module. +* `input_arg_shapes_str` (optional): A string representation of input argument + shapes for 'main' entry-point, separating + tensors with ':', dimension with ',', and + using '?' for unknown sizes. For example, + `input-arg-shapes=1,2::1,?` expresses + argument shapes `[1,2], [] and [1,?]`. + +#### Error Handling + +Return `py::none()` (equivalent to Python's `None`) if there's an error. An +exception will be raised with details about the error. diff --git a/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/BUILD b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/BUILD index 86066ca5e6b6c9..f7a1c77026d215 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/BUILD +++ b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/BUILD @@ -1,5 +1,6 @@ load( "//tensorflow:tensorflow.default.bzl", + "get_compatible_with_portable", "tf_py_strict_test", "tf_python_pybind_extension", ) @@ -20,6 +21,7 @@ package( default_visibility = [ ":internal_visibility_allowlist_package", "//tensorflow:__pkg__", + "//tensorflow/python:__pkg__", ], licenses = ["notice"], ) @@ -45,20 +47,60 @@ package( # ) # copybara:uncomment_end -tf_python_pybind_extension( - name = "pywrap_tensorflow_to_stablehlo", - srcs = ["pywrap_tensorflow_to_stablehlo.cc"], - pytype_srcs = ["pywrap_tensorflow_to_stablehlo.pyi"], +# This is a header-only target. The purpose of `pywrap_tensorflow_to_stablehlo_lib_*` targets is to expose only +# the symbols that are required by `pywrap_tensorflow_to_stablehlo` that translates them to python functions. +# The only intended use case of this library is by `pywrap_tensorflow_to_stablehlo`. Not letting +# `pywrap_tensorflow_to_stablehlo` directly depend on sub-libraries like `static_range_srq` and instead haiving +# a consolidated impl library `pywrap_tensorflow_to_stablehlo_lib_impl` allows the maintainers to avoid +# declaring multiple impl libraries to `libtensorflow_cc` and `lib_pywrap_tensorflow_internal`, +# which is required to avoid ODR violations. +cc_library( + name = "pywrap_tensorflow_to_stablehlo_lib_header_only", + srcs = [], + hdrs = ["pywrap_tensorflow_to_stablehlo_lib.h"], + compatible_with = get_compatible_with_portable(), + visibility = ["//visibility:private"], # ONLY for `pywrap_tensorflow_to_stablehlo`. + deps = [ + "@com_google_absl//absl/status:statusor", + "@com_google_absl//absl/strings:string_view", + ], +) + +# See the comments for `pywrap_tensorflow_to_stablehlo_lib_header_only`. +cc_library( + name = "pywrap_tensorflow_to_stablehlo_lib_impl", + srcs = ["pywrap_tensorflow_to_stablehlo_lib.cc"], + hdrs = ["pywrap_tensorflow_to_stablehlo_lib.h"], + compatible_with = get_compatible_with_portable(), + visibility = [ + "//tensorflow:__pkg__", # For libtensorflow_cc.so. + "//tensorflow/python:__pkg__", # For lib_pywrap_tensorflow_internal.so. + ], deps = [ - "//tensorflow/compiler/mlir/quantization/tensorflow/python:type_casters", "//tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo:tf_to_stablehlo", "//tensorflow/compiler/mlir/tensorflow", - "@com_google_absl//absl/strings:str_format", + "//tensorflow/core:lib", + "//third_party/python_runtime:headers", + "@com_google_absl//absl/status", + "@com_google_absl//absl/status:statusor", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:string_view", "@llvm-project//llvm:Support", - "@llvm-project//mlir:CAPIIR", + "@llvm-project//mlir:BytecodeWriter", "@llvm-project//mlir:IR", "@llvm-project//mlir:Parser", "@llvm-project//mlir:Support", + ], +) + +tf_python_pybind_extension( + name = "pywrap_tensorflow_to_stablehlo", + srcs = ["pywrap_tensorflow_to_stablehlo.cc"], + pytype_srcs = ["pywrap_tensorflow_to_stablehlo.pyi"], + # Each dependency MUST be either header-only or exclusive. + deps = [ + ":pywrap_tensorflow_to_stablehlo_lib_header_only", + "//third_party/python_runtime:headers", "@pybind11", "@pybind11_abseil//pybind11_abseil:absl_casters", "@pybind11_abseil//pybind11_abseil:status_casters", diff --git a/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo.cc b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo.cc index 54792f908026bb..1d1f775f5dfda1 100644 --- a/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo.cc +++ b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo.cc @@ -12,159 +12,79 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include -#include "absl/strings/str_format.h" -#include "llvm/Support/ToolOutputFile.h" -#include "mlir/IR/OwningOpRef.h" // from @llvm-project -#include "mlir/Parser/Parser.h" // from @llvm-project -#include "mlir/Support/FileUtilities.h" // from @llvm-project #include "pybind11/pybind11.h" // from @pybind11 #include "pybind11/pytypes.h" // from @pybind11 -#include "pybind11/stl.h" // from @pybind11 // IWYU pragma: keep #include "pybind11_abseil/absl_casters.h" // from @pybind11_abseil // IWYU pragma: keep #include "pybind11_abseil/status_casters.h" // from @pybind11_abseil // IWYU pragma: keep -#include "tensorflow/compiler/mlir/quantization/tensorflow/python/type_casters.h" // IWYU pragma: keep -#include "tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/tf_to_stablehlo.h" -#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h" +#include "tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo_lib.h" namespace py = pybind11; -namespace mlir::pywrap { +namespace { -absl::StatusOr ModuleToBytecode(ModuleOp module) { - std::string bytecode; - llvm::raw_string_ostream os(bytecode); - mlir::BytecodeWriterConfig config; - if (mlir::failed(mlir::writeBytecodeToFile(module, os, config))) { - return absl::InvalidArgumentError("mlir::writeBytecodeToFile failed"); - } - return bytecode; -} - -absl::StatusOr ExportModule(ModuleOp module) { - auto output_filename = absl::StrFormat( - "%s/tf_module.mlir", std::filesystem::temp_directory_path()); - - std::string error_msg; - auto output = openOutputFile(output_filename, &error_msg); - if (output == nullptr) { - return absl::AbortedError( - absl::StrCat("Unable to open output path: ", error_msg)); - } - - std::string result; - llvm::raw_string_ostream os(result); - OpPrintingFlags printing_flags; - module.print(os, printing_flags); - - output->os() << result; - output->keep(); - - return output_filename; -} - -py::bytes PywrapSavedModelToStablehlo( - absl::string_view input_path, - const std::vector& exported_model_signatures = - {"serving_default"}, - const std::vector& tag_names = {"serve"}, - absl::string_view input_arg_shapes_str = "") { - mlir::DialectRegistry registry; - RegisterAllTensorFlowDialects(registry); - mlir::MLIRContext context(registry); - context.loadAllAvailableDialects(); - - auto module = - TfToStablehlo(input_path, &context, exported_model_signatures, tag_names, - input_arg_shapes_str, /*is_input_mlir_module=*/false); - - if (!module.ok()) { - PyErr_SetString(PyExc_ValueError, - "failed to converted TensorFlow to StableHLO"); - return {}; - } - - auto bytecode = ModuleToBytecode(module.value().get()); - if (!bytecode.ok()) { - PyErr_SetString(PyExc_ValueError, "failed to write module to bytecode"); - return {}; - } - - return bytecode.value(); -} - -py::bytes PywrapTfModuleToStablehlo( - absl::string_view module_op_str, - absl::string_view input_arg_shapes_str = "") { - mlir::DialectRegistry registry; - RegisterAllTensorFlowDialects(registry); - mlir::MLIRContext context(registry); - context.loadAllAvailableDialects(); - - auto tf_module = mlir::parseSourceString(module_op_str, &context); - if (!tf_module) { - PyErr_SetString(PyExc_ValueError, "failed to parse TF module string"); - return {}; - } - - auto mlir_file_path = ExportModule(*tf_module); - if (!mlir_file_path.ok()) { - PyErr_SetString(PyExc_ValueError, - "failed to write TF module to a temporary file"); - return {}; - } - - auto module = TfToStablehlo( - *mlir_file_path, &context, /*exported_model_signatures=*/{}, - /*tag_names=*/{}, input_arg_shapes_str, /*is_input_mlir_module=*/true); - - if (!module.ok()) { - PyErr_SetString(PyExc_ValueError, - "failed to converted TensorFlow to StableHLO"); - return {}; - } - - auto bytecode = ModuleToBytecode(module.value().get()); - if (!bytecode.ok()) { - PyErr_SetString(PyExc_ValueError, "failed to write module to bytecode"); - return {}; - } - - return bytecode.value(); -} +using mlir::tensorflow_to_stablehlo::pywrap::PywrapSavedModelToStablehlo; +using mlir::tensorflow_to_stablehlo::pywrap::PywrapTfModuleToStablehlo; -} // namespace mlir::pywrap +} // namespace PYBIND11_MODULE(pywrap_tensorflow_to_stablehlo, m) { m.doc() = "TensorFlow to StableHLO APIs."; // LINT.IfChange(savedmodel_to_stablehlo) - m.def("savedmodel_to_stablehlo", &mlir::pywrap::PywrapSavedModelToStablehlo, - R"pbdoc( - This tool converts TensorFlow SavedModel to StableHLO. + m.def( + "savedmodel_to_stablehlo", + [](absl::string_view input_path, + const std::vector& exported_model_signatures = + {"serving_default"}, + const std::vector& tag_names = {"serve"}, + absl::string_view input_arg_shapes_str = "") -> py::bytes { + auto module_bytecode = + PywrapSavedModelToStablehlo(input_path, exported_model_signatures, + tag_names, input_arg_shapes_str); + if (!module_bytecode.ok()) { + PyErr_SetString(PyExc_ValueError, + module_bytecode.status().ToString().c_str()); + throw py::error_already_set(); + } + return py::bytes(module_bytecode.value()); + }, + R"pbdoc( + Converts a TensorFlow SavedModel into StableHLO bytecode. * input-path: The path to the input TensorFlow SavedModel. * exported-model-signatures: Comma-separated list of exported model - signatures to convert. Ignored for MLIR input. - * tag_names: Comma-separated list of tags for loading SavedModel. Ignored for MLIR - input. + signatures to convert. + * tag_names: Comma-separated list of tags for loading SavedModel. * input-arg-shapes: A string representation of input argument shapes for 'main' entry-point, separating tensors with ':', dimension with ',', and using '?' for unknown sizes. For example, 'input-arg-shapes=1,2::1,?' expresses argument shapes [1,2], [] and [1,?]. )pbdoc", - py::arg("input_path"), - py::arg("exported_model_signatures") = - std::vector{"serving_default"}, - py::arg("tag_names") = std::vector{"serve"}, - py::arg("input_arg_shapes_str") = ""); - // LINT.ThenChange(pywrap_tensorflow_to_stablehlo.pyi:tensorflow_to_stablehlo) + py::arg("input_path"), + py::arg("exported_model_signatures") = + std::vector{"serving_default"}, + py::arg("tag_names") = std::vector{"serve"}, + py::arg("input_arg_shapes_str") = ""); + // LINT.ThenChange(pywrap_tensorflow_to_stablehlo.pyi:savedmodel_to_stablehlo) // - // LINT.IfChange(tensorflow_mlir_to_stablehlo) - m.def("tensorflow_module_to_stablehlo", - &mlir::pywrap::PywrapTfModuleToStablehlo, - R"pbdoc( - This tool converts TensorFlow mlir module string to StableHLO. + // LINT.IfChange(tensorflow_module_to_stablehlo) + m.def( + "tensorflow_module_to_stablehlo", + [](absl::string_view module_op_str, + absl::string_view input_arg_shapes_str) -> py::bytes { + auto module_bytecode = + PywrapTfModuleToStablehlo(module_op_str, input_arg_shapes_str); + if (!module_bytecode.ok()) { + PyErr_SetString(PyExc_ValueError, + module_bytecode.status().ToString().c_str()); + throw py::error_already_set(); + } + return py::bytes(module_bytecode.value()); + }, + R"pbdoc( + Converts a TensorFlow MLIR module string into StableHLO bytecode. * module: TensorFlow MLIR module string. * input-arg-shapes: A string representation of input argument shapes for @@ -172,6 +92,6 @@ PYBIND11_MODULE(pywrap_tensorflow_to_stablehlo, m) { using '?' for unknown sizes. For example, 'input-arg-shapes=1,2::1,?' expresses argument shapes [1,2], [] and [1,?]. )pbdoc", - py::arg("module"), py::arg("input_arg_shapes_str") = ""); - // LINT.ThenChange(pywrap_tensorflow_to_stablehlo.pyi:tensorflow_mlir_to_stablehlo) + py::arg("module"), py::arg("input_arg_shapes_str") = ""); + // LINT.ThenChange(pywrap_tensorflow_to_stablehlo.pyi:tensorflow_module_to_stablehlo) } diff --git a/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo_lib.cc b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo_lib.cc new file mode 100644 index 00000000000000..cbd535a861482f --- /dev/null +++ b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo_lib.cc @@ -0,0 +1,141 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo_lib.h" + +#include +#include + +#include "absl/status/status.h" +#include "absl/status/statusor.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "llvm/Support/ToolOutputFile.h" +#include "llvm/Support/raw_ostream.h" +#include "mlir/Bytecode/BytecodeWriter.h" // from @llvm-project +#include "mlir/IR/BuiltinOps.h" // from @llvm-project +#include "mlir/IR/DialectRegistry.h" // from @llvm-project +#include "mlir/IR/MLIRContext.h" // from @llvm-project +#include "mlir/IR/OperationSupport.h" // from @llvm-project +#include "mlir/Parser/Parser.h" // from @llvm-project +#include "mlir/Support/FileUtilities.h" // from @llvm-project +#include "mlir/Support/LogicalResult.h" // from @llvm-project +#include "tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/tf_to_stablehlo.h" +#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h" +#include "tensorflow/core/platform/path.h" + +namespace mlir::tensorflow_to_stablehlo::pywrap { + +absl::StatusOr ModuleToBytecode(ModuleOp module) { + std::string bytecode; + llvm::raw_string_ostream os(bytecode); + mlir::BytecodeWriterConfig config; + if (mlir::failed(mlir::writeBytecodeToFile(module, os, config))) { + return absl::InvalidArgumentError("mlir::writeBytecodeToFile failed"); + } + return bytecode; +} + +absl::StatusOr ExportModule(ModuleOp module) { + const std::string output_filename = tensorflow::io::GetTempFilename(".mlir"); + std::string error_msg; + auto output = openOutputFile(output_filename, &error_msg); + if (output == nullptr) { + return absl::UnknownError( + absl::StrCat("Unable to open output path: ", error_msg)); + } + + std::string result; + llvm::raw_string_ostream os(result); + OpPrintingFlags printing_flags; + module.print(os, printing_flags); + + output->os() << result; + output->keep(); + + return output_filename; +} + +absl::StatusOr PywrapSavedModelToStablehlo( + absl::string_view input_path, + const std::vector& exported_model_signatures, + const std::vector& tag_names, + absl::string_view input_arg_shapes_str) { + mlir::DialectRegistry registry; + RegisterAllTensorFlowDialects(registry); + mlir::MLIRContext context(registry); + context.loadAllAvailableDialects(); + + auto module = + TfToStablehlo(input_path, &context, exported_model_signatures, tag_names, + input_arg_shapes_str, /*is_input_mlir_module=*/false); + + if (!module.ok()) { + return absl::UnknownError( + absl::StrCat("Failed to convert SavedModel to StableHLO: ", + module.status().message())); + } + + auto bytecode = ModuleToBytecode(module.value().get()); + if (!bytecode.ok()) { + return absl::UnknownError( + absl::StrCat("Failed to serialize MLIR module to bytecode: ", + bytecode.status().message())); + } + + return bytecode.value(); +} + +absl::StatusOr PywrapTfModuleToStablehlo( + absl::string_view module_op_str, absl::string_view input_arg_shapes_str) { + mlir::DialectRegistry registry; + RegisterAllTensorFlowDialects(registry); + mlir::MLIRContext context(registry); + context.loadAllAvailableDialects(); + + auto tf_module = mlir::parseSourceString(module_op_str, &context); + if (!tf_module) { + return absl::UnknownError("Failed to parse MLIR module"); + } + + auto mlir_file_path = ExportModule(*tf_module); + if (!mlir_file_path.ok()) { + return absl::UnknownError( + absl::StrCat("Failed to write MLIR module to file.", + mlir_file_path.status().message())); + } + + auto module = TfToStablehlo(*mlir_file_path, &context, + /*exported_model_signatures=*/{}, + /*tag_names=*/{}, input_arg_shapes_str, + /*is_input_mlir_module=*/true); + + if (!module.ok()) { + return absl::UnknownError( + absl::StrCat(" Failed to convert SavedModel to StableHLO: ", + module.status().message())); + } + + auto bytecode = ModuleToBytecode(module.value().get()); + if (!bytecode.ok()) { + return absl::UnknownError( + absl::StrCat("Failed to serialize MLIR module to bytecode: ", + bytecode.status().message())); + } + + return bytecode.value(); +} + +} // namespace mlir::tensorflow_to_stablehlo::pywrap diff --git a/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo_lib.h b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo_lib.h new file mode 100644 index 00000000000000..c79ed32b990dd6 --- /dev/null +++ b/tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo_lib.h @@ -0,0 +1,67 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_TO_STABLEHLO_PYTHON_PYWRAP_TENSORFLOW_TO_STABLEHLO_LIB_H_ +#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_TO_STABLEHLO_PYTHON_PYWRAP_TENSORFLOW_TO_STABLEHLO_LIB_H_ + +#include +#include + +#include "absl/status/statusor.h" +#include "absl/strings/string_view.h" + +namespace mlir::tensorflow_to_stablehlo::pywrap { + +// Converts a TensorFlow SavedModel to a StableHLO MLIR module and serializes it +// to bytecode. +// +// Args: +// input_path: The path to the SavedModel directory. +// exported_model_signatures: Comma-separated list of exported model +// signatures to convert. tag_names: Comma-separated list of tags for loading +// SavedModel. +// input_arg_shapes_str: A string representation of input argument +// shapes for 'main' entry-point, separating tensors with ':', dimension +// with ',', and using '?' for unknown sizes. For example, +// 'input-arg-shapes=1,2::1,?' expresses argument shapes [1,2], [] and [1,?]. +// +// Returns: +// An absl::StatusOr containing the serialized bytecode of the StableHLO +// module on success, or an error status on failure. +absl::StatusOr PywrapSavedModelToStablehlo( + absl::string_view input_path, + const std::vector& exported_model_signatures, + const std::vector& tag_names, + absl::string_view input_arg_shapes_str); + +// Converts a TensorFlow MLIR module string to a StableHLO MLIR module and +// serializes it to bytecode. +// +// Args: +// module_op_str: TensorFlow MLIR module string. +// input_arg_shapes_str: A string representation of input argument +// shapes for 'main' entry-point, separating tensors with ':', dimension +// with ',', and using '?' for unknown sizes. For example, +// 'input-arg-shapes=1,2::1,?' expresses argument shapes [1,2], [] and [1,?]. +// +// Returns: +// An absl::StatusOr containing the serialized bytecode of the StableHLO +// module on success, or an error status on failure. +absl::StatusOr PywrapTfModuleToStablehlo( + absl::string_view module_op_str, absl::string_view input_arg_shapes_str); + +} // namespace mlir::tensorflow_to_stablehlo::pywrap + +#endif // TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_TO_STABLEHLO_PYTHON_PYWRAP_TENSORFLOW_TO_STABLEHLO_LIB_H_ diff --git a/tensorflow/compiler/mlir/tensorflow/BUILD b/tensorflow/compiler/mlir/tensorflow/BUILD index 0be47767d4cabe..b138c2d3efd598 100644 --- a/tensorflow/compiler/mlir/tensorflow/BUILD +++ b/tensorflow/compiler/mlir/tensorflow/BUILD @@ -676,10 +676,12 @@ cc_library( ":tensorflow", ":tensorflow_op_interfaces", ":tensorflow_side_effects", + ":tensorflow_traits", ":tensorflow_types", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/container:node_hash_map", + "@com_google_absl//absl/log", "@llvm-project//llvm:Support", "@llvm-project//mlir:Analysis", "@llvm-project//mlir:FuncDialect", @@ -1664,7 +1666,6 @@ aliased_targets = [ "export_graphdef", "import_model", "export_tf_dialect_op", - "translate_tf_dialect_op", "mlir_roundtrip_flags", "mlir_import_options", "translate_lib", diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.cc b/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.cc index cab89bb10b5fb9..267bc48d17e06d 100644 --- a/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.cc +++ b/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.cc @@ -20,29 +20,36 @@ limitations under the License. #include #include +#include "absl/log/log.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SCCIterator.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "mlir/Analysis/CallGraph.h" // from @llvm-project #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project -#include "mlir/IR/Attributes.h" // from @llvm-project #include "mlir/IR/Block.h" // from @llvm-project +#include "mlir/IR/BuiltinAttributes.h" // from @llvm-project #include "mlir/IR/BuiltinOps.h" // from @llvm-project -#include "mlir/IR/BuiltinTypes.h" // from @llvm-project +#include "mlir/IR/OpDefinition.h" // from @llvm-project #include "mlir/IR/Operation.h" // from @llvm-project #include "mlir/IR/Value.h" // from @llvm-project +#include "mlir/IR/ValueRange.h" // from @llvm-project #include "mlir/IR/Visitors.h" // from @llvm-project #include "mlir/Interfaces/CallInterfaces.h" // from @llvm-project +#include "mlir/Interfaces/SideEffectInterfaces.h" // from @llvm-project #include "mlir/Support/LLVM.h" // from @llvm-project -#include "mlir/Support/LogicalResult.h" // from @llvm-project +#include "mlir/Support/TypeID.h" // from @llvm-project #include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_op_interfaces.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" +#include "tensorflow/compiler/mlir/tensorflow/ir/tf_traits.h" +#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h" namespace mlir { namespace TF { diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.h b/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.h index 7afec29bc5df75..c49852c1864763 100644 --- a/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.h +++ b/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.h @@ -21,14 +21,21 @@ limitations under the License. #include #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project +#include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "mlir/IR/Operation.h" // from @llvm-project #include "mlir/IR/Region.h" // from @llvm-project +#include "mlir/IR/SymbolTable.h" // from @llvm-project #include "mlir/IR/TypeUtilities.h" // from @llvm-project +#include "mlir/IR/Value.h" // from @llvm-project +#include "mlir/Support/TypeID.h" // from @llvm-project #include "tensorflow/compiler/mlir/tensorflow/analysis/per_function_aggregate_analysis.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h" diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/resource_dataflow.h b/tensorflow/compiler/mlir/tensorflow/analysis/resource_dataflow.h index 0cf3611af1d20c..1e68ac41d25b54 100644 --- a/tensorflow/compiler/mlir/tensorflow/analysis/resource_dataflow.h +++ b/tensorflow/compiler/mlir/tensorflow/analysis/resource_dataflow.h @@ -25,9 +25,11 @@ limitations under the License. #include "llvm/Support/Debug.h" #include "mlir/Analysis/DataFlow/DeadCodeAnalysis.h" // from @llvm-project #include "mlir/Analysis/DataFlow/SparseAnalysis.h" // from @llvm-project +#include "mlir/Analysis/DataFlowFramework.h" // from @llvm-project #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project #include "mlir/IR/Builders.h" // from @llvm-project #include "mlir/IR/BuiltinOps.h" // from @llvm-project +#include "mlir/IR/MLIRContext.h" // from @llvm-project #include "mlir/IR/SymbolTable.h" // from @llvm-project #include "mlir/IR/Value.h" // from @llvm-project #include "mlir/Pass/Pass.h" // from @llvm-project diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.cc b/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.cc index abace5111184ff..372446641382ac 100644 --- a/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.cc +++ b/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.cc @@ -16,9 +16,18 @@ limitations under the License. #include +#include "llvm/ADT/STLExtras.h" #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project #include "mlir/IR/BuiltinAttributes.h" // from @llvm-project +#include "mlir/IR/BuiltinOps.h" // from @llvm-project +#include "mlir/IR/BuiltinTypes.h" // from @llvm-project +#include "mlir/IR/Region.h" // from @llvm-project +#include "mlir/IR/SymbolTable.h" // from @llvm-project +#include "mlir/IR/Types.h" // from @llvm-project +#include "mlir/IR/Value.h" // from @llvm-project +#include "mlir/Interfaces/CallInterfaces.h" // from @llvm-project #include "mlir/Support/LLVM.h" // from @llvm-project +#include "mlir/Support/LogicalResult.h" // from @llvm-project #include "mlir/Transforms/RegionUtils.h" // from @llvm-project #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h" diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.h b/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.h index 9817b290c4cbdb..738d8c1df3d395 100644 --- a/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.h +++ b/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.h @@ -22,6 +22,11 @@ limitations under the License. #include "llvm/ADT/StringRef.h" #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project #include "mlir/IR/BuiltinOps.h" // from @llvm-project +#include "mlir/IR/Operation.h" // from @llvm-project +#include "mlir/IR/Region.h" // from @llvm-project +#include "mlir/IR/Value.h" // from @llvm-project +#include "mlir/Support/LLVM.h" // from @llvm-project +#include "mlir/Support/LogicalResult.h" // from @llvm-project #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" namespace mlir { diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc index df0138a20a0c74..179b3979348161 100644 --- a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc +++ b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc @@ -26,25 +26,32 @@ limitations under the License. #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/node_hash_map.h" +#include "absl/log/log.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/STLFunctionalExtras.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project -#include "mlir/IR/Attributes.h" // from @llvm-project #include "mlir/IR/Block.h" // from @llvm-project #include "mlir/IR/Builders.h" // from @llvm-project #include "mlir/IR/BuiltinAttributes.h" // from @llvm-project #include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "mlir/IR/Operation.h" // from @llvm-project +#include "mlir/IR/SymbolTable.h" // from @llvm-project +#include "mlir/IR/TypeUtilities.h" // from @llvm-project #include "mlir/IR/Value.h" // from @llvm-project #include "mlir/IR/Visitors.h" // from @llvm-project +#include "mlir/Interfaces/CallInterfaces.h" // from @llvm-project #include "mlir/Interfaces/SideEffectInterfaces.h" // from @llvm-project #include "mlir/Support/DebugStringHelper.h" // from @llvm-project #include "mlir/Support/LLVM.h" // from @llvm-project +#include "mlir/Support/TypeID.h" // from @llvm-project +#include "tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h" +#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_op_interfaces.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h index 97fcd30d36d02f..feb90de18857b2 100644 --- a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h +++ b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h @@ -23,12 +23,18 @@ limitations under the License. #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/STLFunctionalExtras.h" #include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project +#include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "mlir/IR/Operation.h" // from @llvm-project #include "mlir/IR/Region.h" // from @llvm-project +#include "tensorflow/compiler/mlir/tensorflow/analysis/per_function_aggregate_analysis.h" #include "tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.h" namespace mlir { diff --git a/tensorflow/compiler/mlir/tensorflow/ir/host_runtime/tfrt_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/host_runtime/tfrt_ops.td index e3736e96851262..e46a6500dfd516 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/host_runtime/tfrt_ops.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/host_runtime/tfrt_ops.td @@ -94,23 +94,17 @@ Empty strings indicate that they are non-partitioned tensors.}]>:$shape_and_slic def TF_IfrtLoadVariableOp : TF_Op<"IfrtLoadVariable", [Pure]> { - let summary = "Loads a restored variable tensor as an IFRT array and tensor future"; + let summary = "Loads a restored variable tensor as a tensor future"; let description = [{ - This op loads a variable tensor as an IFRT array and binds it with the specified name. + This op loads a restored variable tensor as a tensor future. It is a + replacement of `tf.ReadVariableOp`. - This op is an replacement of `tf.ReadVariableOp` in the case that a constant - variable tensor is an input to the tpu program invoked by `tf.IfrtCall`. + This op returns a scalar string tensor containing the restored variable name, which can be + used as a key within the runtime, as well as a future for the tensor. - After a `tf.ReadVariableOp` is lowered into `tf.IfrtLoadVariableOp`, the `tf.IfrtCall` kernel - will bind the loaded IFRT array by name with the tpu program's input. - - `tf.IfrtLoadVariableOp` converts the tensor into an IFRT array based on device and sharding - configuration specified in `VariableDeviceShardingConfigProto`. - - This op returns a scalar string tensor containing the loaded variable name, which can be - used as a key to look for the loaded IFRT array in runtime and a restored tensor, which - maybe lowered to a future by runtime. + The `tf.IfrtCall` kernel uses the output $array_key. + Other ops executed by TFRT may make use of $tensor_future. }]; // TODO(b/339423851) Redefine the IfrtLoadVariableOp as it doesn't require the diff --git a/tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/simple_tf_dialect_op.mlir b/tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/simple_tf_dialect_op.mlir deleted file mode 100644 index 780406e0c16127..00000000000000 --- a/tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/simple_tf_dialect_op.mlir +++ /dev/null @@ -1,32 +0,0 @@ -// RUN: tf-mlir-translate -test-only-mlir-to-tf-nodedef %s -o - | FileCheck %s - -func.func @main() { -^bb0: - // CHECK: name: "node_name" - // CHECK-NEXT: op: "Const" - // CHECK-NEXT: attr { - // CHECK: key: "dtype" - // CHECK-NEXT: value { - // CHECK-NEXT: type: DT_INT32 - // CHECK-NEXT: } - // CHECK-NEXT: } - // CHECK-NEXT: attr { - // CHECK-NEXT: key: "value" - // CHECK-NEXT: value { - // CHECK-NEXT: tensor { - // CHECK-NEXT: dtype: DT_INT32 - // CHECK-NEXT: tensor_shape { - // CHECK-NEXT: dim { - // CHECK-NEXT: size: 2 - // CHECK-NEXT: } - // CHECK-NEXT: } - // CHECK-NEXT: tensor_content: "\200\000\000\000\200\000\000\000" - // CHECK: experimental_debug_info { - // CHECK-NEXT: original_node_names: "n1" - // CHECK-NEXT: original_func_names: "f1" - // CHECK-NEXT: } - %0 = "tf.Const"() {value = #tf_type : tensor<2xi32>} : () -> (tensor<2xi32>) loc(fused[callsite("n1@f1" at callsite("node_name" at "file_loc"))]) - func.return -} - - diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_island_coarsening.cc b/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_island_coarsening.cc index 2f456248c381af..b75f081d1a0064 100644 --- a/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_island_coarsening.cc +++ b/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_island_coarsening.cc @@ -395,7 +395,7 @@ bool is_valid_special_tpu_op( bool op_has_inconsistent_cluster_name = wrapped_op_cluster_name.has_value() && - !wrapped_op_cluster_name.value().equals(cluster_name); + wrapped_op_cluster_name.value() != cluster_name; if (op_has_inconsistent_cluster_name) { return false; diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc b/tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc index 2ff6c78896fff2..faedd25114807e 100644 --- a/tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc +++ b/tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc @@ -37,12 +37,13 @@ struct RewriteTPUEmbeddingOps // Rewrites the given op to `OpT` op after adding the given operand at the end. template -OpT AddOperandAndRewriteAs(Operation* op, Value operand, OpBuilder* builder) { +OpT AddOperandAndRewriteAs(Operation* op, Value operand, NamedAttrList attr, + OpBuilder* builder) { builder->setInsertionPoint(op); auto operands = llvm::to_vector<4>(op->getOperands()); operands.push_back(operand); auto new_op = builder->create(op->getLoc(), op->getResultTypes(), - operands, op->getAttrs()); + operands, attr.getAttrs()); op->replaceAllUsesWith(new_op.getOperation()->getResults()); op->erase(); return new_op; @@ -83,8 +84,8 @@ LogicalResult RunOnRegion(Region* region) { // Rewrite RecvTPUEmbeddingActivations op to the corresponding internal op. if (recv_op) - AddOperandAndRewriteAs(recv_op, dedup_op, - &builder); + AddOperandAndRewriteAs( + recv_op, dedup_op, recv_op->getAttrs(), &builder); // Rewrite SendTPUEmbeddingGradients op to the corresponding internal op and // then update the OperandSegmentSize attribute. @@ -92,11 +93,11 @@ LogicalResult RunOnRegion(Region* region) { int32_t operand_sizes[] = {static_cast(send_op.getN()), static_cast(send_op.getNN()), 1}; auto operand_size_attr = builder.getDenseI32ArrayAttr(operand_sizes); + NamedAttrList attrs(send_op->getAttrs()); + attrs.set(send_op.getOperandSegmentSizeAttr(), operand_size_attr); - auto new_send_op = AddOperandAndRewriteAs( - send_op, dedup_op, &builder); - new_send_op->setAttr(new_send_op.getOperandSegmentSizeAttr(), - operand_size_attr); + AddOperandAndRewriteAs(send_op, dedup_op, + attrs, &builder); } return success(); } diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tfg-to-tfe.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tfg-to-tfe.cc index 8180b4116ef21b..68d50e54a1bce0 100644 --- a/tensorflow/compiler/mlir/tensorflow/transforms/tfg-to-tfe.cc +++ b/tensorflow/compiler/mlir/tensorflow/transforms/tfg-to-tfe.cc @@ -115,7 +115,7 @@ static void FilterOutBlockArgControlDep( static void SplitNextIteration(Block &block) { // TODO(b/207144333): Supports callback for unregistered ops block.walk([&](Operation *op) { - if (!op->getName().getStringRef().equals("tfg.NextIteration")) return; + if (op->getName().getStringRef() != "tfg.NextIteration") return; mlir::OpBuilder builder(op); llvm::SmallVector new_operands; diff --git a/tensorflow/compiler/mlir/tensorflow/translate/BUILD b/tensorflow/compiler/mlir/tensorflow/translate/BUILD index 620e4d7783609e..f0280340dddf62 100644 --- a/tensorflow/compiler/mlir/tensorflow/translate/BUILD +++ b/tensorflow/compiler/mlir/tensorflow/translate/BUILD @@ -112,22 +112,6 @@ cc_library( ], ) -cc_library( - name = "translate_tf_dialect_op", - srcs = ["translate_tf_dialect_op.cc"], - deps = [ - ":export_tf_dialect_op", - "//tensorflow/compiler/mlir/tensorflow", - "@llvm-project//llvm:Support", - "@llvm-project//mlir:FuncDialect", - "@llvm-project//mlir:IR", - "@llvm-project//mlir:Support", - "@llvm-project//mlir:TranslateLib", - "@local_tsl//tsl/platform:protobuf", - ], - alwayslink = 1, -) - cc_library( name = "mlir_roundtrip_flags", srcs = ["mlir_roundtrip_flags.cc"], diff --git a/tensorflow/compiler/mlir/tensorflow/translate/translate_tf_dialect_op.cc b/tensorflow/compiler/mlir/tensorflow/translate/translate_tf_dialect_op.cc deleted file mode 100644 index 856db032e501ae..00000000000000 --- a/tensorflow/compiler/mlir/tensorflow/translate/translate_tf_dialect_op.cc +++ /dev/null @@ -1,75 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "llvm/ADT/STLExtras.h" -#include "llvm/Support/ToolOutputFile.h" -#include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project -#include "mlir/IR/BuiltinOps.h" // from @llvm-project -#include "mlir/IR/Location.h" // from @llvm-project -#include "mlir/IR/MLIRContext.h" // from @llvm-project -#include "mlir/Tools/mlir-translate/Translation.h" // from @llvm-project -#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h" -#include "tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.h" -#include "tsl/platform/protobuf.h" - -namespace mlir { -static mlir::Operation* ExtractOnlyOp(mlir::ModuleOp module) { - mlir::func::FuncOp fn = module.lookupSymbol("main"); - if (!fn) return nullptr; - - if (!llvm::hasSingleElement(fn)) return nullptr; - - // Here, modules with exactly two operations in the only basic block are - // supported. The last operation should be a terminator operation and the - // other operation is the operation of interest. - auto& block = fn.front(); - if (block.getOperations().size() != 2) return nullptr; - if (!block.back().hasTrait()) return nullptr; - - return &block.front(); -} - -static LogicalResult MlirToTfNodeDef(ModuleOp module, - llvm::raw_ostream& output) { - auto* context = module.getContext(); - - Operation* op = ExtractOnlyOp(module); - if (!op) { - emitError(UnknownLoc::get(context), - "modules with exactly one op other than terminator in a " - "'main' function's " - "only block are supported"); - return failure(); - } - - auto node_def_or = tensorflow::ConvertTFDialectOpToNodeDef( - op, "node_name", /*ignore_unregistered_attrs=*/false); - if (!node_def_or.ok()) { - op->emitError("failed to convert to TF NodeDef:") - << node_def_or.status().ToString(); - return failure(); - } - - output << tsl::LegacyUnredactedDebugString(*node_def_or.value()); - return success(); -} - -// Test only translation to convert a simple MLIR module with a single TF -// dialect op to NodeDef. -static TranslateFromMLIRRegistration translate_from_mlir_registration( - "test-only-mlir-to-tf-nodedef", "test-only-mlir-to-tf-nodedef", - MlirToTfNodeDef, mlir::RegisterAllTensorFlowDialects); - -} // namespace mlir diff --git a/tensorflow/compiler/mlir/tensorflow/utils/export_utils.cc b/tensorflow/compiler/mlir/tensorflow/utils/export_utils.cc index 7bb35d158f00cd..96ba0afd096a16 100644 --- a/tensorflow/compiler/mlir/tensorflow/utils/export_utils.cc +++ b/tensorflow/compiler/mlir/tensorflow/utils/export_utils.cc @@ -282,12 +282,12 @@ static bool IsRefTypeControlOp(mlir::Operation* op) { if (!op_name_or_status.ok()) return false; auto op_name = std::move(op_name_or_status).value(); - if (op_name.equals("NextIteration")) + if (op_name == "NextIteration") return mlir::isa( mlir::getElementTypeOrSelf(op->getOperand(0).getType())); - if (op_name.equals("Enter") || op_name.equals("Exit") || - op_name.equals("Switch") || op_name.equals("Merge")) { + if (op_name == "Enter" || op_name == "Exit" || op_name == "Switch" || + op_name == "Merge") { return mlir::isa( getElementTypeOrSelf(op->getResult(0).getType())); } diff --git a/tensorflow/compiler/mlir/tf2xla/api/v2/BUILD b/tensorflow/compiler/mlir/tf2xla/api/v2/BUILD index 545203ad20ea23..709a63bea84ebe 100644 --- a/tensorflow/compiler/mlir/tf2xla/api/v2/BUILD +++ b/tensorflow/compiler/mlir/tf2xla/api/v2/BUILD @@ -210,6 +210,7 @@ tf_cc_test( srcs = ["tf_dialect_to_executor_test.cc"], data = [ "testdata/empty_func.mlir", + "testdata/func_with_dead_ops.mlir", "testdata/invalid_executor.mlir", ], deps = [ @@ -220,10 +221,9 @@ tf_cc_test( "@com_google_absl//absl/status", "@com_google_absl//absl/strings", "@com_google_googletest//:gtest_main", + "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:Parser", "@local_tsl//tsl/lib/core:status_test_util", - "@local_tsl//tsl/lib/monitoring:test_utils", - "@local_tsl//tsl/platform:status", ], ) diff --git a/tensorflow/compiler/mlir/tf2xla/api/v2/testdata/func_with_dead_ops.mlir b/tensorflow/compiler/mlir/tf2xla/api/v2/testdata/func_with_dead_ops.mlir new file mode 100644 index 00000000000000..f8dd51f4e12d3c --- /dev/null +++ b/tensorflow/compiler/mlir/tf2xla/api/v2/testdata/func_with_dead_ops.mlir @@ -0,0 +1,62 @@ +module attributes {tf.devices = {"/job:tpu_host_worker/replica:0/task:0/device:CPU:0", "/job:tpu_host_worker/replica:0/task:0/device:TPU:0", "/job:tpu_host_worker/replica:0/task:0/device:TPU:1", "/job:tpu_host_worker/replica:0/task:0/device:TPU_SYSTEM:0", "/job:tpu_host_worker/replica:0/task:1/device:CPU:0", "/job:tpu_host_worker/replica:0/task:1/device:TPU:0", "/job:tpu_host_worker/replica:0/task:1/device:TPU:1", "/job:tpu_host_worker/replica:0/task:1/device:TPU_SYSTEM:0", "/job:tpu_host_worker/replica:0/task:2/device:CPU:0", "/job:tpu_host_worker/replica:0/task:2/device:TPU:0", "/job:tpu_host_worker/replica:0/task:2/device:TPU:1", "/job:tpu_host_worker/replica:0/task:2/device:TPU_SYSTEM:0", "/job:tpu_host_worker/replica:0/task:3/device:CPU:0", "/job:tpu_host_worker/replica:0/task:3/device:TPU:0", "/job:tpu_host_worker/replica:0/task:3/device:TPU:1", "/job:tpu_host_worker/replica:0/task:3/device:TPU_SYSTEM:0"}, tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1847 : i32}} { + func.func @main(%arg0: tensor {tf._user_specified_name = "steps", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg1: tensor<*x!tf_type.resource>> {tf._user_specified_name = "899", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg2: tensor<*x!tf_type.resource>> {tf._user_specified_name = "901", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg3: tensor<*x!tf_type.resource>> {tf._user_specified_name = "903", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg4: tensor<*x!tf_type.resource>> {tf._user_specified_name = "905", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg5: tensor<*x!tf_type.resource>> {tf._user_specified_name = "907", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg6: tensor<*x!tf_type.resource>> {tf._user_specified_name = "909", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg7: tensor<*x!tf_type.resource>> {tf._user_specified_name = "911", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg8: tensor<*x!tf_type.resource>> {tf._user_specified_name = "913", tf.device = "/job:tpu_host_worker/replica:0/task:1/device:CPU:0"}, %arg9: tensor<*x!tf_type.resource>> {tf._user_specified_name = "915", tf.device = "/job:tpu_host_worker/replica:0/task:2/device:CPU:0"}, %arg10: tensor<*x!tf_type.resource>> {tf._user_specified_name = "917", tf.device = "/job:tpu_host_worker/replica:0/task:3/device:CPU:0"}, %arg11: tensor<*x!tf_type.resource>> {tf._user_specified_name = "919", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg12: tensor<*x!tf_type.resource>> {tf._user_specified_name = "921", tf.device = "/job:tpu_host_worker/replica:0/task:1/device:CPU:0"}, %arg13: tensor<*x!tf_type.resource>> {tf._user_specified_name = "923", tf.device = "/job:tpu_host_worker/replica:0/task:2/device:CPU:0"}, %arg14: tensor<*x!tf_type.resource>> {tf._user_specified_name = "925", tf.device = "/job:tpu_host_worker/replica:0/task:3/device:CPU:0"}, %arg15: tensor<*x!tf_type.resource>> {tf._user_specified_name = "927", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg16: tensor<*x!tf_type.resource>> {tf._user_specified_name = "929", tf.device = "/job:tpu_host_worker/replica:0/task:1/device:CPU:0"}, %arg17: tensor<*x!tf_type.resource>> {tf._user_specified_name = "931", tf.device = "/job:tpu_host_worker/replica:0/task:2/device:CPU:0"}, %arg18: tensor<*x!tf_type.resource>> {tf._user_specified_name = "933", tf.device = "/job:tpu_host_worker/replica:0/task:3/device:CPU:0"}, %arg19: tensor<*x!tf_type.resource>> {tf._user_specified_name = "935", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg20: tensor<*x!tf_type.resource>> {tf._user_specified_name = "937", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}, %arg21: tensor<*x!tf_type.resource>> {tf._user_specified_name = "939", tf.device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0"}) -> tensor attributes {allow_soft_placement = false, tf.entry_function = {control_outputs = "", inputs = "steps,unknown,unknown_0,unknown_1,unknown_2,unknown_3,unknown_4,unknown_5,unknown_6,unknown_7,unknown_8,unknown_9,unknown_10,unknown_11,unknown_12,unknown_13,unknown_14,unknown_15,unknown_16,unknown_17,unknown_18,unknown_19", outputs = "statefulpartitionedcall_RetVal"}} { + %0 = "tf.ReadVariableOp"(%arg19) : (tensor<*x!tf_type.resource>>) -> tensor<128x1024xf32> + %1 = "tf.ReadVariableOp"(%arg1) : (tensor<*x!tf_type.resource>>) -> tensor + %2 = "tf.ReadVariableOp"(%arg2) : (tensor<*x!tf_type.resource>>) -> tensor + %3 = "tf.ReadVariableOp"(%arg4) : (tensor<*x!tf_type.resource>>) -> tensor<1024xf32> + %4 = "tf.ReadVariableOp"(%arg3) : (tensor<*x!tf_type.resource>>) -> tensor<128x1024xf32> + %5 = "tf.ReadVariableOp"(%arg5) : (tensor<*x!tf_type.resource>>) -> tensor<1024x1xf32> + %6 = "tf.ReadVariableOp"(%arg20) : (tensor<*x!tf_type.resource>>) -> tensor<1024xf32> + %7 = "tf.ReadVariableOp"(%arg21) : (tensor<*x!tf_type.resource>>) -> tensor<1024x1xf32> + %8 = "tf.ReadVariableOp"(%arg6) : (tensor<*x!tf_type.resource>>) -> tensor + %9 = "tf.Const"() <{value = dense<"test"> : tensor<3x!tf_type.string>}> : () -> tensor<3x!tf_type.string> + %cst = "tf.Const"() <{value = dense<0> : tensor}> : () -> tensor + %11:4 = "tf.Split"(%cst, %0) {num_split = 4 : i32} : (tensor, tensor<128x1024xf32>) -> (tensor<32x1024xf32>, tensor<32x1024xf32>, tensor<32x1024xf32>, tensor<32x1024xf32>) + %cst_0 = "tf.Const"() <{value = dense<0> : tensor}> : () -> tensor + %12:4 = "tf.Split"(%cst_0, %4) {num_split = 4 : i32} : (tensor, tensor<128x1024xf32>) -> (tensor<32x1024xf32>, tensor<32x1024xf32>, tensor<32x1024xf32>, tensor<32x1024xf32>) + %cst_1 = "tf.Const"() <{value = dense<0> : tensor}> : () -> tensor + %cst_2 = "tf.Const"() <{value = dense<0> : tensor}> : () -> tensor + %13:20 = tf_device.replicate {devices = {TPU_REPLICATED_CORE_0 = ["/job:tpu_host_worker/replica:0/task:0/device:TPU:0", "/job:tpu_host_worker/replica:0/task:2/device:TPU:0"], TPU_REPLICATED_CORE_1 = ["/job:tpu_host_worker/replica:0/task:0/device:TPU:1", "/job:tpu_host_worker/replica:0/task:2/device:TPU:1"], TPU_REPLICATED_CORE_2 = ["/job:tpu_host_worker/replica:0/task:1/device:TPU:0", "/job:tpu_host_worker/replica:0/task:3/device:TPU:0"], TPU_REPLICATED_CORE_3 = ["/job:tpu_host_worker/replica:0/task:1/device:TPU:1", "/job:tpu_host_worker/replica:0/task:3/device:TPU:1"], TPU_REPLICATED_HOST_0 = ["/job:tpu_host_worker/replica:0/task:0/device:CPU:0", "/job:tpu_host_worker/replica:0/task:2/device:CPU:0"], TPU_REPLICATED_HOST_1 = ["/job:tpu_host_worker/replica:0/task:0/device:CPU:0", "/job:tpu_host_worker/replica:0/task:2/device:CPU:0"], TPU_REPLICATED_HOST_2 = ["/job:tpu_host_worker/replica:0/task:1/device:CPU:0", "/job:tpu_host_worker/replica:0/task:3/device:CPU:0"], TPU_REPLICATED_HOST_3 = ["/job:tpu_host_worker/replica:0/task:1/device:CPU:0", "/job:tpu_host_worker/replica:0/task:3/device:CPU:0"]}, n = 2 : i32} { + %16:40 = "tf_device.parallel_execute"() ({ + %19:10 = "tf_device.launch"() <{device = "TPU_REPLICATED_CORE_0"}> ({ + %20:10 = "tf.TPUExecute"(%arg0, %11#0, %1, %2, %3, %12#0, %5, %6, %7, %8, %9) : (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor, tensor<3x!tf_type.string>) -> (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor) + tf_device.return %20#0, %20#1, %20#2, %20#3, %20#4, %20#5, %20#6, %20#7, %20#8, %20#9 : tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor + }) : () -> (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor) + tf_device.return %19#0, %19#1, %19#2, %19#3, %19#4, %19#5, %19#6, %19#7, %19#8, %19#9 : tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor + }, { + %19:10 = "tf_device.launch"() <{device = "TPU_REPLICATED_CORE_1"}> ({ + %20:10 = "tf.TPUExecute"(%arg0, %11#1, %1, %2, %3, %12#1, %5, %6, %7, %8, %9) : (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor, tensor<3x!tf_type.string>) -> (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor) + tf_device.return %20#0, %20#1, %20#2, %20#3, %20#4, %20#5, %20#6, %20#7, %20#8, %20#9 : tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor + }) : () -> (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor) + tf_device.return %19#0, %19#1, %19#2, %19#3, %19#4, %19#5, %19#6, %19#7, %19#8, %19#9 : tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor + }, { + %19:10 = "tf_device.launch"() <{device = "TPU_REPLICATED_CORE_2"}> ({ + %20:10 = "tf.TPUExecute"(%arg0, %11#2, %1, %2, %3, %12#2, %5, %6, %7, %8, %9) : (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor, tensor<3x!tf_type.string>) -> (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor) + tf_device.return %20#0, %20#1, %20#2, %20#3, %20#4, %20#5, %20#6, %20#7, %20#8, %20#9 : tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor + }) : () -> (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor) + tf_device.return %19#0, %19#1, %19#2, %19#3, %19#4, %19#5, %19#6, %19#7, %19#8, %19#9 : tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor + }, { + %19:10 = "tf_device.launch"() <{device = "TPU_REPLICATED_CORE_3"}> ({ + %20:10 = "tf.TPUExecute"(%arg0, %11#3, %1, %2, %3, %12#3, %5, %6, %7, %8, %9) : (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor, tensor<3x!tf_type.string>) -> (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor) + tf_device.return %20#0, %20#1, %20#2, %20#3, %20#4, %20#5, %20#6, %20#7, %20#8, %20#9 : tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor + }) : () -> (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor) + tf_device.return %19#0, %19#1, %19#2, %19#3, %19#4, %19#5, %19#6, %19#7, %19#8, %19#9 : tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor + }) : () -> (tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor, tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor, tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor, tensor, tensor<32x1024xf32>, tensor, tensor, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor) + %17 = "tf.Concat"(%cst_1, %16#5, %16#15, %16#25, %16#35) : (tensor, tensor<32x1024xf32>, tensor<32x1024xf32>, tensor<32x1024xf32>, tensor<32x1024xf32>) -> tensor<128x1024xf32> + %18 = "tf.Concat"(%cst_2, %16#1, %16#11, %16#21, %16#31) : (tensor, tensor<32x1024xf32>, tensor<32x1024xf32>, tensor<32x1024xf32>, tensor<32x1024xf32>) -> tensor<128x1024xf32> + tf_device.return %16#0, %16#9, %16#8, %16#7, %16#6, %17, %16#4, %16#3, %16#2, %18 : tensor, tensor, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor<128x1024xf32>, tensor<1024xf32>, tensor, tensor, tensor<128x1024xf32> + } + "tf.AssignVariableOp"(%arg19, %13#18) <{validate_shape = false}> : (tensor<*x!tf_type.resource>>, tensor<128x1024xf32>) -> () + "tf.AssignVariableOp"(%arg1, %13#16) <{validate_shape = false}> : (tensor<*x!tf_type.resource>>, tensor) -> () + "tf.AssignVariableOp"(%arg2, %13#14) <{validate_shape = false}> : (tensor<*x!tf_type.resource>>, tensor) -> () + "tf.AssignVariableOp"(%arg4, %13#12) <{validate_shape = false}> : (tensor<*x!tf_type.resource>>, tensor<1024xf32>) -> () + "tf.AssignVariableOp"(%arg3, %13#10) <{validate_shape = false}> : (tensor<*x!tf_type.resource>>, tensor<128x1024xf32>) -> () + "tf.AssignVariableOp"(%arg5, %13#8) <{validate_shape = false}> : (tensor<*x!tf_type.resource>>, tensor<1024x1xf32>) -> () + "tf.AssignVariableOp"(%arg20, %13#6) <{validate_shape = false}> : (tensor<*x!tf_type.resource>>, tensor<1024xf32>) -> () + "tf.AssignVariableOp"(%arg21, %13#4) <{validate_shape = false}> : (tensor<*x!tf_type.resource>>, tensor<1024x1xf32>) -> () + "tf.AssignVariableOp"(%arg6, %13#2) <{validate_shape = true}> {_has_manual_control_dependencies = true} : (tensor<*x!tf_type.resource>>, tensor) -> () + %14 = "tf.ReadVariableOp"(%arg2) {device = ""} : (tensor<*x!tf_type.resource>>) -> tensor + %15 = "tf.Identity"(%14) {device = ""} : (tensor) -> tensor + return %15 : tensor + } +} diff --git a/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.cc b/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.cc index c92fd85d3567b4..cd13e869e811dd 100644 --- a/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.cc +++ b/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.cc @@ -88,6 +88,8 @@ void AddTfDialectToExecutorPasses(OpPassManager &pm) { pm.addNestedPass(mlir::TFTPU::CreateTPUDevicePropagationPass()); pm.addNestedPass(mlir::TFTPU::CreateTPUColocateSplitsPass()); pm.addPass(mlir::createSymbolDCEPass()); + pm.addNestedPass( + mlir::tf_executor::CreateTFExecutorGraphPruningPass()); if (tensorflow::GetMlirCommonFlags() ->tf_mlir_enable_convert_control_to_data_outputs_pass) { bool composite_tpuexecute_side_effects = diff --git a/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor_test.cc b/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor_test.cc index 0c64dd3dcbe1a3..897c800d9e4cd7 100644 --- a/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor_test.cc +++ b/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor_test.cc @@ -15,12 +15,16 @@ limitations under the License. #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.h" +#include + #include #include #include #include "absl/status/status.h" #include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "llvm/Support/raw_ostream.h" #include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "mlir/IR/DialectRegistry.h" // from @llvm-project #include "mlir/IR/MLIRContext.h" // from @llvm-project @@ -30,7 +34,6 @@ limitations under the License. #include "tensorflow/core/lib/monitoring/cell_reader.h" #include "tensorflow/core/platform/resource_loader.h" #include "tsl/lib/core/status_test_util.h" -#include "tsl/platform/status.h" namespace tensorflow { namespace tf2xla { @@ -53,6 +56,16 @@ std::string TestDataPath() { "tensorflow/compiler/mlir/tf2xla/api/v2/testdata/"); } +size_t CountSubstring(absl::string_view str, absl::string_view substr) { + size_t count = 0; + size_t idx = str.find(substr); + while (idx != std::string::npos) { + count++; + idx = str.find(substr, idx + 1); + } + return count; +} + class TensorflowDialectToExecutorTest : public ::testing::Test { public: TensorflowDialectToExecutorTest() { @@ -100,6 +113,23 @@ TEST_F(TensorflowDialectToExecutorTest, ErrorsWhenCannotConvert) { EXPECT_EQ(compilation_status.Delta(kExportFailed), 1); } +TEST_F(TensorflowDialectToExecutorTest, PrunesDeadOps) { + CellReader compilation_status(kExportStreamzName); + + TF_ASSERT_OK(CreateMlirModule("func_with_dead_ops.mlir")); + + TF_EXPECT_OK(ExportFromTensorflowDialectToExecutor(*mlir_module_)); + + std::string module_dump; + llvm::raw_string_ostream raw_stream(module_dump); + mlir_module_->print(raw_stream); + + EXPECT_EQ(compilation_status.Delta(kExportSuccess), 1); + EXPECT_EQ(compilation_status.Delta(kExportFailed), 0); + EXPECT_EQ( + CountSubstring(module_dump, "tf_executor.island wraps \"tf.Concat\""), 2); +} + } // namespace } // namespace v2 } // namespace tf2xla diff --git a/tensorflow/compiler/mlir/tfrt/ir/mlrt/BUILD b/tensorflow/compiler/mlir/tfrt/ir/mlrt/BUILD index 5bc69ce7a70758..374aad2a242d9b 100644 --- a/tensorflow/compiler/mlir/tfrt/ir/mlrt/BUILD +++ b/tensorflow/compiler/mlir/tfrt/ir/mlrt/BUILD @@ -14,7 +14,6 @@ td_library( includes = ["."], visibility = [ # copybara:uncomment "//learning/brain/tfrt/mlir:__subpackages__", - "//learning/infra/mira/distributed:__subpackages__", ], deps = [ "@llvm-project//mlir:OpBaseTdFiles", @@ -51,7 +50,6 @@ cc_library( ], visibility = [ # copybara:uncomment "//learning/brain/tfrt/mlir:__subpackages__", - "//learning/infra/mira/distributed:__subpackages__", "//tensorflow/compiler/mlir/tfrt:__subpackages__", ], deps = [ @@ -71,9 +69,6 @@ td_library( "tf_ops.td", ], includes = ["."], - visibility = [ - # copybara:uncomment "//learning/infra/mira/distributed:__subpackages__", - ], deps = [ ":mlrt_td_files", "//tensorflow/compiler/mlir/tensorflow:tensorflow_ops_td_files", @@ -156,7 +151,6 @@ cc_library( hdrs = ["tf_mlrt_ops.h"], visibility = [ # copybara:uncomment "//learning/brain/experimental/tfrt/mlrt/application/tensorflow/tests:__subpackages__", - # copybara:uncomment "//learning/infra/mira/distributed:__subpackages__", "//tensorflow/compiler/mlir/tfrt:__subpackages__", ], deps = [ diff --git a/tensorflow/compiler/mlir/tfrt/ir/mlrt/tf_mlrt_ops.td b/tensorflow/compiler/mlir/tfrt/ir/mlrt/tf_mlrt_ops.td index ae4fa4be4f78c9..0659143f49b39b 100644 --- a/tensorflow/compiler/mlir/tfrt/ir/mlrt/tf_mlrt_ops.td +++ b/tensorflow/compiler/mlir/tfrt/ir/mlrt/tf_mlrt_ops.td @@ -449,21 +449,14 @@ def IfrtLoadVariableOp: TensorflowMlrt_Op<"ifrt_load_variable", [Pure]> { let summary = "Loads a variable tensor as an IFRT array for mlrt"; let description = [{ - This is the MLRT version of tf.IfrtLoadVariableOp. + This op loads a restored variable tensor as a tensor future. It is a + replacement of `tf.ReadVariableOp`. - This op loads a variable tensor as an IFRT array and binds it with the specified name. + This op returns a scalar string tensor containing the restored variable name, which can be + used as a key within the runtime, as well as a future for the tensor. - This op is an replacement of `tf.ReadVariableOp` in the case that a constant - variable tensor is an input to the tpu program invoked by `tf.IfrtCall`. - - After a `tf.ReadVariableOp` is lowered into `tf.IfrtLoadVariableOp`, the `tf.IfrtCall` kernel - will bind the loaded IFRT array by name with the tpu program's input. - - `tf.IfrtLoadVariableOp` converts the tensor into an IFRT array based on device and sharding - configuration specified in `VariableDeviceShardingConfigProto`. - - This op returns a scalar string tensor as a key for user to look for the loaded array - and a future containing the restored tensor. + The `tf.IfrtCall` kernel uses the output $array_key. + Other ops executed by TFRT may make use of $tensor_future. }]; let arguments = (ins diff --git a/tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_sync.cc b/tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_sync.cc index 8083fcac076745..6a429ef275e869 100644 --- a/tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_sync.cc +++ b/tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_sync.cc @@ -30,6 +30,7 @@ limitations under the License. #include "tfrt/core_runtime/opdefs/attributes.h" // from @tf_runtime #include "tfrt/core_runtime/opdefs/core_runtime.h" // from @tf_runtime #include "tfrt/core_runtime/opdefs/types.h" // from @tf_runtime +#include "tfrt/tensor/opdefs/tensor.h" // from @tf_runtime namespace tfrt { namespace fallback_sync { @@ -50,7 +51,7 @@ FallbackSyncDialect::FallbackSyncDialect(MLIRContext *context) } static Type GetTensorType(Builder *builder) { - return tfrt::t::TensorType::get(builder->getContext()); + return tfrt::tfrt_tensor::TensorType::get(builder->getContext()); } } // namespace fallback_sync diff --git a/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/const_tensor.mlir b/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/const_tensor.mlir index b208fe390acc3f..6596d650889384 100644 --- a/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/const_tensor.mlir +++ b/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/const_tensor.mlir @@ -9,12 +9,12 @@ func.func @string_tensor() -> (tensor<0x!tf_type.string>, tensor<7x!tf_type.stri func.return %0, %1 : tensor<0x!tf_type.string>, tensor<7x!tf_type.string> } -// Convert tf.Const to corert.const_dense_tensor only on cpu device +// Convert tf.Const to tfrt_fallback_async.const_dense_tensor only on cpu device // CHECK-LABEL: func @dense_tensor func.func @dense_tensor() -> tensor<4xui64> { - // CHECK: corert.const_dense_tensor dense<[1, 2, 3, 4]> : tensor<4xui64> + // CHECK: tfrt_fallback_async.const_dense_tensor dense<[1, 2, 3, 4]> : tensor<4xui64> %0 = "tf.Const"() {value = dense<[1, 2, 3, 4]> : tensor<4xui64>} : () -> tensor<4xui64> - // CHECK: corert.const_dense_tensor dense<1.000000e+00> : tensor<1xbf16> + // CHECK: tfrt_fallback_async.const_dense_tensor dense<1.000000e+00> : tensor<1xbf16> %1 = "tf.Const"() {device = "/device:CPU:0", value = dense<[1.0]> : tensor<1xbf16>} : () -> tensor<4xbf16> // CHECK: corert.executeop({{.*}}) "tf.Const"() {dtype = ui64, value = dense<[1, 2, 3, 4]> : tensor<4xui64>} : 1 %2 = "tf.Const"() {device = "/device:GPU:0", value = dense<[1, 2, 3, 4]> : tensor<4xui64>} : () -> tensor<4xui64> diff --git a/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/control_flow.mlir b/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/control_flow.mlir index e3562b286c47f3..ad3232042ca5e7 100644 --- a/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/control_flow.mlir +++ b/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/control_flow.mlir @@ -56,10 +56,8 @@ func.func @while_body_add2(%arg0: tensor) -> tensor { // CHECK-LABEL: func @while_test // CHECK-SAME: ([[ARG0:%.+]]: !tfrt.chain) -> (!tfrt.chain, !tfrt_fallback.tf_tensor) func.func @while_test() -> (tensor) { - // CHECK: [[CONST_TH:%.*]] = corert.const_dense_tensor dense<0> : tensor + // CHECK: [[CONST:%.*]] = tfrt_fallback_async.const_dense_tensor dense<0> : tensor %0 = "tf.Const"() {device = "/device:CPU:0", value = dense<0> : tensor} : () -> tensor - // CHECK: [[CONST:%.*]] = tfrt_fallback_async.corert_tensorhandle_to_fallback_tensor [[CONST_TH]] - // CHECK: (!corert.tensorhandle) -> (!tfrt_fallback.tf_tensor) // CHECK: [[pred_res:%.*]]:2 = tfrt.call @"while_cond_lt9/tfrt_predicate"([[ARG0]], [[CONST]]) : (!tfrt.chain, !tfrt_fallback.tf_tensor) -> (!tfrt.chain, i1) // CHECK: [[while_res:%.]]:2 = tfrt.while [[pred_res]]#1 @"while_body_add2/tfrt_body_1"([[pred_res]]#0, [[CONST]]) // CHECK-SAME: (!tfrt.chain, !tfrt_fallback.tf_tensor) -> (!tfrt.chain, !tfrt_fallback.tf_tensor) diff --git a/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/decompose_resource_op.mlir b/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/decompose_resource_op.mlir index ed308d02ad6e0b..ff0f0e7dbfd2cd 100644 --- a/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/decompose_resource_op.mlir +++ b/tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/decompose_resource_op.mlir @@ -5,8 +5,7 @@ module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, p // CHECK-LABEL: func @gather // CHECK-SAME: ([[in_chain:%.*]]: !tfrt.chain // CHECK-SAME: [[arg0:%.*]]: !tfrt_fallback.tf_tensor, [[arg1:%.*]]: !tfrt_fallback.tf_tensor) -// CHECK: [[const_th:%.*]] = corert.const_dense_tensor -// CHECK-NEXT: [[const:%.*]] = tfrt_fallback_async.corert_tensorhandle_to_fallback_tensor [[const_th]] {device = "/job:localhost/replica:0/task:0/device:CPU:0"} +// CHECK: [[const:%.*]] = tfrt_fallback_async.const_dense_tensor // CHECK-NEXT: [[out_chain:%.*]], [[value:%.*]] = tfrt_fallback_async.executeop.seq([[in_chain]]) key(0) cost({{.*}}) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.ReadVariableOp"({{.*}}) // CHECK-NEXT: [[res:%.*]] = tfrt_fallback_async.executeop key(1) cost({{.*}}) device("/job:localhost/replica:0/task:0/device:CPU:0") "tf.GatherV2"([[value]], {{.*}}, [[const]]) // CHECK-NEXT: tfrt.return [[out_chain]], [[res]] : !tfrt.chain, !tfrt_fallback.tf_tensor diff --git a/tensorflow/compiler/mlir/tfrt/transforms/mlrt/BUILD b/tensorflow/compiler/mlir/tfrt/transforms/mlrt/BUILD index d7fafb49ee6cdd..ed518285828d1a 100644 --- a/tensorflow/compiler/mlir/tfrt/transforms/mlrt/BUILD +++ b/tensorflow/compiler/mlir/tfrt/transforms/mlrt/BUILD @@ -1,7 +1,6 @@ package( # copybara:uncomment default_applicable_licenses = ["//tensorflow:license"], default_visibility = [ - # copybara:uncomment "//learning/infra/mira/distributed:__subpackages__", "//tensorflow/compiler/mlir/tfrt:__subpackages__", "//tensorflow/core/tfrt:__subpackages__", ], diff --git a/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt.cc b/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt.cc index 024baa43c73132..f090745e0ae1c4 100644 --- a/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt.cc +++ b/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt.cc @@ -738,11 +738,11 @@ class FallbackBatchFunctionOpConversion // Lower a tf.Const op that creates a string tensor to a native // corert.create_string_tensor op. -class CoreRTConstDenseTensorOpConversion +class FallbackConstDenseTensorOpConversion : public mlir::OpConversionPattern { public: - CoreRTConstDenseTensorOpConversion(mlir::MLIRContext *context, - CoreRTConverter *corert_converter) + FallbackConstDenseTensorOpConversion(mlir::MLIRContext *context, + CoreRTConverter *corert_converter) : mlir::OpConversionPattern(context, kCoreRTBenefit), corert_converter_(*corert_converter) {} @@ -756,8 +756,8 @@ class CoreRTConstDenseTensorOpConversion if (auto parsed_device_name = corert_converter_.ParseDeviceName(op)) if (parsed_device_name->device_type != DEVICE_CPU) return failure(); - auto new_op = rewriter.create( - op.getLoc(), corert_converter_.tensor_handle_type(), + auto new_op = rewriter.create( + op.getLoc(), rewriter.getType(), mlir::cast(op.getValue())); rewriter.replaceOp(op, new_op->getResult(0)); return success(); @@ -860,11 +860,11 @@ class TFRTFuncOpSignatureConversion // Lower a tf.Const op that creates a string tensor to a native // corert.create_string_tensor op. -class CoreRTConstStringTensorOpConversion +class FallbackConstStringTensorOpConversion : public mlir::OpConversionPattern { public: - CoreRTConstStringTensorOpConversion(mlir::MLIRContext *context, - CoreRTConverter *corert_converter) + FallbackConstStringTensorOpConversion(mlir::MLIRContext *context, + CoreRTConverter *corert_converter) : mlir::OpConversionPattern(context, kCoreRTBenefit), corert_converter_(*corert_converter) {} @@ -890,8 +890,8 @@ class CoreRTConstStringTensorOpConversion for (auto dim : shape) dims.push_back(rewriter.getIntegerAttr(i64_type, dim)); - auto new_op = rewriter.create( - op.getLoc(), corert_converter_.tensor_handle_type(), + auto new_op = rewriter.create( + op.getLoc(), rewriter.getType(), rewriter.getArrayAttr(dims), rewriter.getArrayAttr(values)); rewriter.replaceOp(op, new_op.getResult()); @@ -1532,8 +1532,9 @@ void PopulateTFToTFRTConversionPatterns( // Here we use specialized patterns for tf.Const on CPU as it is incorrect to // use ExecuteOp pattern to convert string tensor attribute. - patterns->add(context, corert_converter); + patterns->add(context, + corert_converter); } // Lower TF dialect MLIR to TFRT dialect. diff --git a/tensorflow/compiler/tests/BUILD b/tensorflow/compiler/tests/BUILD index d255b67ccff83f..956296e561f205 100644 --- a/tensorflow/compiler/tests/BUILD +++ b/tensorflow/compiler/tests/BUILD @@ -2082,7 +2082,7 @@ cuda_py_strict_test( cuda_py_strict_test( name = "dense_layer_test", - size = "medium", + size = "large", srcs = ["dense_layer_test.py"], tags = [ "no_pip", # TODO(b/149738646): fix pip install so these tests run on kokoro pip diff --git a/tensorflow/core/common_runtime/BUILD b/tensorflow/core/common_runtime/BUILD index 5467fadecaeb28..0ee8c2658e9a1a 100644 --- a/tensorflow/core/common_runtime/BUILD +++ b/tensorflow/core/common_runtime/BUILD @@ -683,6 +683,7 @@ cc_library( "@com_google_absl//absl/strings", "@com_google_absl//absl/time", "@com_google_absl//absl/types:optional", + "@local_tsl//tsl/platform:env", ], alwayslink = 1, ) @@ -936,8 +937,9 @@ cc_library( ":function_body", ":graph_constructor", "//tensorflow/core:framework", - "//tensorflow/core:graph", - "//tensorflow/core:protos_all_cc", + "//tensorflow/core/lib/core:status", + "//tensorflow/core/platform:refcount", + "@local_tsl//tsl/platform:errors", ], ) @@ -1267,8 +1269,10 @@ cc_library( "//tensorflow/core:framework", "//tensorflow/core:graph", "//tensorflow/core:lib", + "//tensorflow/core/config:flag_defs", "//tensorflow/core/platform:refcount", "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/types:span", ], ) @@ -1584,6 +1588,7 @@ cc_library( "//tensorflow/core:framework", "//tensorflow/core:lib", "//tensorflow/core:lib_internal", + "@local_tsl//tsl/platform:env", ], ) @@ -3144,6 +3149,7 @@ tf_cc_test( "//tensorflow/core:test", "//tensorflow/core:test_main", "//tensorflow/core:testlib", + "//tensorflow/core/config:flag_defs", ], ) @@ -3209,6 +3215,7 @@ tf_cc_test( "//tensorflow/cc:client_session", "//tensorflow/cc:function_ops", "//tensorflow/cc:ops", + "//tensorflow/cc:scope", "//tensorflow/core:all_kernels", "//tensorflow/core:framework", "//tensorflow/core:framework_internal", @@ -3217,6 +3224,7 @@ tf_cc_test( "//tensorflow/core:test", "//tensorflow/core:test_main", "//tensorflow/core:testlib", + "//tensorflow/core/config:flag_defs", "@com_google_absl//absl/strings", "@com_google_googletest//:gtest", ], diff --git a/tensorflow/core/common_runtime/base_collective_executor.cc b/tensorflow/core/common_runtime/base_collective_executor.cc index cb759b4aa61973..9e020823c8a836 100644 --- a/tensorflow/core/common_runtime/base_collective_executor.cc +++ b/tensorflow/core/common_runtime/base_collective_executor.cc @@ -35,7 +35,6 @@ limitations under the License. #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/refcount.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/lib/connected_traceme.h" #include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h" diff --git a/tensorflow/core/common_runtime/device/device_event_mgr.cc b/tensorflow/core/common_runtime/device/device_event_mgr.cc index 1941cea9bc74f4..e786e5c9915bc9 100644 --- a/tensorflow/core/common_runtime/device/device_event_mgr.cc +++ b/tensorflow/core/common_runtime/device/device_event_mgr.cc @@ -172,8 +172,7 @@ void EventMgr::EnqueueCallback(se::Stream* stream, std::function func) { // Events are created on demand, and repeatedly reused. There is no // limit placed here on the number of allocated Events. if (free_events_.empty()) { - free_events_.push_back(std::make_unique(exec_)); - free_events_.back()->Init(); + free_events_.emplace_back(exec_->CreateEvent().value()); } std::unique_ptr e = std::move(free_events_.back()); diff --git a/tensorflow/core/common_runtime/device_mgr.h b/tensorflow/core/common_runtime/device_mgr.h index 6e8420de1733cb..82688a2311a345 100644 --- a/tensorflow/core/common_runtime/device_mgr.h +++ b/tensorflow/core/common_runtime/device_mgr.h @@ -65,7 +65,7 @@ class DeviceMgr { // Clears given containers of all devices if 'container' is // non-empty. Otherwise, clears default containers of all devices. - virtual void ClearContainers(gtl::ArraySlice containers) const = 0; + virtual void ClearContainers(absl::Span containers) const = 0; virtual int NumDeviceType(const string& type) const = 0; @@ -102,7 +102,7 @@ class DynamicDeviceMgr : public DeviceMgr { string DeviceMappingString() const override; Status LookupDevice(StringPiece name, Device** device) const override; bool ContainsDevice(int64_t device_incarnation) const override; - void ClearContainers(gtl::ArraySlice containers) const override; + void ClearContainers(absl::Span containers) const override; int NumDeviceType(const string& type) const override; int NumDevices() const override; Device* HostCPU() const override; diff --git a/tensorflow/core/common_runtime/direct_session.cc b/tensorflow/core/common_runtime/direct_session.cc index 50285f87b2283c..46a9fd5943fb40 100644 --- a/tensorflow/core/common_runtime/direct_session.cc +++ b/tensorflow/core/common_runtime/direct_session.cc @@ -73,7 +73,6 @@ limitations under the License. #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mutex.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/lib/connected_traceme.h" #include "tensorflow/core/profiler/lib/device_profiler_session.h" diff --git a/tensorflow/core/common_runtime/direct_session.h b/tensorflow/core/common_runtime/direct_session.h index 6421862af65af0..a755befa5bf0d8 100644 --- a/tensorflow/core/common_runtime/direct_session.h +++ b/tensorflow/core/common_runtime/direct_session.h @@ -241,8 +241,8 @@ class DirectSession : public Session { // Retrieves an already existing set of executors to run 'inputs' and // 'outputs', or creates and caches them for future use. ::tensorflow::Status GetOrCreateExecutors( - gtl::ArraySlice inputs, gtl::ArraySlice outputs, - gtl::ArraySlice target_nodes, + absl::Span inputs, absl::Span outputs, + absl::Span target_nodes, ExecutorsAndKeys** executors_and_keys, RunStateArgs* run_state_args); // Creates a set of executors to run the subgraph defined by diff --git a/tensorflow/core/common_runtime/direct_session_test.cc b/tensorflow/core/common_runtime/direct_session_test.cc index 7ba65fe96edd22..a25ef55e21d810 100644 --- a/tensorflow/core/common_runtime/direct_session_test.cc +++ b/tensorflow/core/common_runtime/direct_session_test.cc @@ -68,9 +68,9 @@ limitations under the License. namespace tensorflow { namespace { -CallableOptions MakeCallableOptions(gtl::ArraySlice feeds, - gtl::ArraySlice fetches, - gtl::ArraySlice targets) { +CallableOptions MakeCallableOptions(absl::Span feeds, + absl::Span fetches, + absl::Span targets) { CallableOptions ret; for (const string& feed : feeds) { ret.add_feed(feed); @@ -1444,7 +1444,7 @@ TEST(DirectSessionTest, SessionSyncRun_DeepGraph) { std::vector nodes; nodes.reserve(1024); - auto make_expensive_noop = [&g](gtl::ArraySlice control_deps) { + auto make_expensive_noop = [&g](absl::Span control_deps) { Node* ret; auto builder = NodeBuilder(g.NewName("N"), "ExpensiveNoop"); for (Node* control_dep : control_deps) { @@ -2850,7 +2850,7 @@ class DirectSessionCollectiveTest : public ::testing::Test { {{"group_size", 2}, {"group_key", 1}, {"instance_key", instance_key}, - {"subdiv_offsets", gtl::ArraySlice({0})}, + {"subdiv_offsets", absl::Span({0})}, {"merge_op", "Add"}, {"final_op", "Div"}, {"T", DT_FLOAT}}, diff --git a/tensorflow/core/common_runtime/dynamic_device_mgr.cc b/tensorflow/core/common_runtime/dynamic_device_mgr.cc index 325bbfd97b9849..06ccc121440394 100644 --- a/tensorflow/core/common_runtime/dynamic_device_mgr.cc +++ b/tensorflow/core/common_runtime/dynamic_device_mgr.cc @@ -127,7 +127,7 @@ bool DynamicDeviceMgr::ContainsDevice(int64_t device_incarnation) const { } void DynamicDeviceMgr::ClearContainers( - gtl::ArraySlice containers) const { + absl::Span containers) const { Status s; tf_shared_lock l(devices_mu_); for (const auto& it : dynamic_devices_) { diff --git a/tensorflow/core/common_runtime/eager/context_distributed_manager.cc b/tensorflow/core/common_runtime/eager/context_distributed_manager.cc index 535427558d36f5..35c583927433de 100644 --- a/tensorflow/core/common_runtime/eager/context_distributed_manager.cc +++ b/tensorflow/core/common_runtime/eager/context_distributed_manager.cc @@ -351,11 +351,9 @@ absl::Status CreateClientOnce( } if (use_creation_info) { - auto memory_spaces = xla::BuildMemorySpaces(pjrt_devices); std::unique_ptr pjrt_client = std::make_unique( platform_name, info->local_client, std::move(pjrt_devices), - std::move(memory_spaces), /*process_index=*/node_id, /*allocator=*/std::move(info->allocator), /*host_memory_allocator=*/std::move(info->host_memory_allocator), diff --git a/tensorflow/core/common_runtime/eager/custom_device_op_handler.cc b/tensorflow/core/common_runtime/eager/custom_device_op_handler.cc index e3e7f455b01123..34b89b4da61eb0 100644 --- a/tensorflow/core/common_runtime/eager/custom_device_op_handler.cc +++ b/tensorflow/core/common_runtime/eager/custom_device_op_handler.cc @@ -67,7 +67,7 @@ Status CustomDeviceOpHandler::Execute(ImmediateExecutionOperation* op, } // The op will be placed on physical device. However, it contains custom - // device tensor handles. The tensor handles will be copy to physical device + // device tensor handles. The tensor handles will be copied to physical device // first. if (op->HasCustomDeviceInput()) { auto inputs = op->GetInputs(); diff --git a/tensorflow/core/common_runtime/executor.cc b/tensorflow/core/common_runtime/executor.cc index 026719175535f5..46b44f7dc86935 100644 --- a/tensorflow/core/common_runtime/executor.cc +++ b/tensorflow/core/common_runtime/executor.cc @@ -70,7 +70,6 @@ limitations under the License. #include "tensorflow/core/platform/profile_utils/cpu_utils.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/thread_annotations.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/lib/annotated_traceme.h" #include "tensorflow/core/profiler/lib/connected_traceme.h" @@ -82,6 +81,7 @@ limitations under the License. #include "tensorflow/core/util/determinism.h" #include "tensorflow/core/util/managed_stack_trace.h" #include "tensorflow/core/util/tensor_slice_reader_cache.h" +#include "tsl/platform/tracing.h" namespace tensorflow { @@ -381,7 +381,7 @@ class ExecutorState { // Step-local container. ScopedStepContainer* step_container_; StepStatsCollectorInterface* const stats_collector_; - const tracing::EventCollector* const event_collector_; + const tsl::tracing::EventCollector* const event_collector_; Context context_; // QUESTION: Make it a checkpoint::TensorSliceReaderCacheWrapper @@ -435,8 +435,8 @@ ExecutorState::ExecutorState( tensor_store_(args.tensor_store), step_container_(args.step_container), stats_collector_(args.stats_collector), - event_collector_( - tracing::GetEventCollector(tracing::EventCategory::kCompute)), + event_collector_(tsl::tracing::GetEventCollector( + tsl::tracing::EventCategory::kCompute)), context_(ContextKind::kThread), slice_reader_cache_(new checkpoint::TensorSliceReaderCacheWrapper), call_frame_(args.call_frame), @@ -563,22 +563,23 @@ struct ExecutorState::AsyncState { // Returns true if `item` might be traced by the given trace and event // collectors. Returns false only if `item` definitely will not be traced. -bool MightTrace(const tracing::EventCollector* event_collector, +bool MightTrace(const tsl::tracing::EventCollector* event_collector, bool is_expensive) { // Tracing will only be enabled if either `event_collector` is non null, // or `trace_collector` is non-null and enabled for this particular kernel. // Although `profiler::TraceMe`, `profiler::ScopedAnnotation`, and - // `tracing::ScopedRegion` check subsets of these properties internally in - // their constructors, the cost of passing the necessary arguments to them can - // be significant, so we avoid constructing them in the common case (when we - // know they will not be used). + // `tsl::tracing::ScopedRegion` check subsets of these properties internally + // in their constructors, the cost of passing the necessary arguments to them + // can be significant, so we avoid constructing them in the common case (when + // we know they will not be used). if (event_collector != nullptr) { return true; } if (tsl::profiler::ScopedAnnotation::IsEnabled()) return true; - return profiler::TraceMe::Active(profiler::GetTFTraceMeLevel(is_expensive)); + return tsl::profiler::TraceMe::Active( + tsl::profiler::GetTFTraceMeLevel(is_expensive)); } template @@ -594,14 +595,14 @@ Status ExecutorState::ProcessSync( const bool is_expensive = kernel_stats_->IsExpensive(item); if (TF_PREDICT_FALSE(MightTrace(event_collector_, is_expensive))) { - tracing::ScopedRegion region(tracing::EventCategory::kCompute, - op_kernel->name_view()); + tsl::tracing::ScopedRegion region(tsl::tracing::EventCategory::kCompute, + op_kernel->name_view()); profiler::AnnotatedTraceMe activity( [op_kernel, &ctx] { return op_kernel->TraceString( - ctx, /*verbose=*/profiler::TfOpDetailsEnabled()); + ctx, /*verbose=*/tsl::profiler::TfOpDetailsEnabled()); }, - profiler::GetTFTraceMeLevel(is_expensive)); + tsl::profiler::GetTFTraceMeLevel(is_expensive)); device->Compute(op_kernel, &ctx); } else if (kernel_stats_->HasExpensiveMarker(item)) { KernelTimer timer; @@ -641,9 +642,9 @@ void ExecutorState::ProcessAsync( profiler::AnnotatedTraceMe activity( [async_kernel, state] { return async_kernel->TraceString( - state->ctx, /*verbose=*/profiler::TfOpDetailsEnabled()); + state->ctx, /*verbose=*/tsl::profiler::TfOpDetailsEnabled()); }, - profiler::GetTFTraceMeLevel(/*is_expensive=*/false)); + tsl::profiler::GetTFTraceMeLevel(/*is_expensive=*/false)); // Trace async op start. profiler::TraceMeProducer producer( @@ -654,7 +655,7 @@ void ExecutorState::ProcessAsync( {"kernel_type", async_kernel->type_string()}, {"step_id", step_id_}}); }, - profiler::ContextType::kTfExecutor); + tsl::profiler::ContextType::kTfExecutor); auto done = [this, state, activity_id, ctx_id = producer.GetContextId()]() { // Trace async op done. @@ -666,7 +667,7 @@ void ExecutorState::ProcessAsync( {"kernel_type", state->item->kernel->type_string()}, {"step_id", step_id_}}); }, - profiler::ContextType::kTfExecutor, ctx_id); + tsl::profiler::ContextType::kTfExecutor, ctx_id); Device* device = immutable_state_.params().device; NodeExecStatsInterface* stats = state->stats; // Shorthand @@ -728,8 +729,8 @@ void ExecutorState::ProcessConstTensor( template void ExecutorState::Process(const TaggedNode& tagged_node, int64_t scheduled_nsec) { - profiler::TraceMe traceme("ExecutorState::Process Scheduled", - profiler::TraceMeLevel::kVerbose); + tsl::profiler::TraceMe traceme("ExecutorState::Process Scheduled", + tsl::profiler::TraceMeLevel::kVerbose); TaggedNodeReadyQueue inline_ready; inline_ready.push_back(tagged_node); return ProcessInline(&inline_ready, scheduled_nsec); @@ -824,8 +825,8 @@ void ExecutorState::ProcessInline( "ExecutorState::Process", {{"id", step_id_}, {"iter_num", tagged_node.get_iter_num()}}); }, - profiler::ContextType::kTfExecutor, trace_id_, - profiler::TraceMeLevel::kInfo); + tsl::profiler::ContextType::kTfExecutor, trace_id_, + tsl::profiler::TraceMeLevel::kInfo); last_iter_num = current_iter_num; } inline_ready->pop_front(); @@ -1265,7 +1266,7 @@ bool ExecutorState::NodeDone( template void ExecutorState::ScheduleReady( TaggedNodeSeq* ready, TaggedNodeReadyQueue* inline_ready) { - profiler::TraceMe activity( + tsl::profiler::TraceMe activity( [&]() { return strings::StrCat( "ExecutorState::ScheduleReady#", @@ -1273,7 +1274,7 @@ void ExecutorState::ScheduleReady( ",inline_ready_size=", (inline_ready == nullptr ? -1 : inline_ready->size()), "#"); }, - profiler::GetTFTraceMeLevel(/*is_expensive=*/false)); + tsl::profiler::GetTFTraceMeLevel(/*is_expensive=*/false)); DCHECK(!ready->empty()); int64_t scheduled_nsec = 0; @@ -1352,14 +1353,14 @@ void ExecutorState::ScheduleReady( TaggedNodeSeq ready_chunk{it, end}; RunTask( [this, ready_chunk = std::move(ready_chunk), scheduled_nsec]() { - profiler::TraceMe activity( + tsl::profiler::TraceMe activity( [&]() { return strings::StrCat( "ExecutorState::ScheduleReady::" "ChildThreadExpensiveNodes#", "ready_chunk_size=", ready_chunk.size(), "#"); }, - profiler::GetTFTraceMeLevel(/*is_expensive=*/false)); + tsl::profiler::GetTFTraceMeLevel(/*is_expensive=*/false)); for (auto& tagged_node : ready_chunk) { RunTask(std::bind(&ExecutorState::Process, this, tagged_node, scheduled_nsec), @@ -1465,8 +1466,8 @@ void ExecutorState::Finish() { return profiler::TraceMeEncode("ExecutorDoneCallback", {{"id", step_id}}); }, - profiler::ContextType::kTfExecutor, trace_id, - profiler::TraceMeLevel::kInfo); + tsl::profiler::ContextType::kTfExecutor, trace_id, + tsl::profiler::TraceMeLevel::kInfo); done_cb(status); }); return; @@ -1488,8 +1489,8 @@ void ExecutorState::Finish() { return profiler::TraceMeEncode("ExecutorDoneCallback", {{"id", step_id}}); }, - profiler::ContextType::kTfExecutor, trace_id, - profiler::TraceMeLevel::kInfo); + tsl::profiler::ContextType::kTfExecutor, trace_id, + tsl::profiler::TraceMeLevel::kInfo); done_cb(status); }); }); @@ -1503,8 +1504,8 @@ void ExecutorState::Finish() { return profiler::TraceMeEncode("ExecutorDoneCallback", {{"id", step_id}}); }, - profiler::ContextType::kTfExecutor, trace_id, - profiler::TraceMeLevel::kInfo); + tsl::profiler::ContextType::kTfExecutor, trace_id, + tsl::profiler::TraceMeLevel::kInfo); done_cb(status); }); } diff --git a/tensorflow/core/common_runtime/executor_test.cc b/tensorflow/core/common_runtime/executor_test.cc index 6a847b3f1b2f9f..cb683e21a707f9 100644 --- a/tensorflow/core/common_runtime/executor_test.cc +++ b/tensorflow/core/common_runtime/executor_test.cc @@ -46,7 +46,6 @@ limitations under the License. #include "tensorflow/core/platform/strcat.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { diff --git a/tensorflow/core/common_runtime/function.cc b/tensorflow/core/common_runtime/function.cc index d4891e6f906138..3cb7aa98a95ec9 100644 --- a/tensorflow/core/common_runtime/function.cc +++ b/tensorflow/core/common_runtime/function.cc @@ -170,13 +170,13 @@ class FunctionLibraryRuntimeOverlay : public FunctionLibraryRuntime { Status GetRetTypes(Handle h, DataTypeVector* ret_types) override; - void Run(const Options& opts, Handle handle, gtl::ArraySlice args, + void Run(const Options& opts, Handle handle, absl::Span args, std::vector* rets, DoneCallback done) override; void Run(const Options& opts, Handle handle, CallFrameInterface* call_frame, DoneCallback done) override; - Status RunSync(Options opts, Handle handle, gtl::ArraySlice args, + Status RunSync(Options opts, Handle handle, absl::Span args, std::vector* rets) override; Status RunSync(Options opts, Handle handle, @@ -240,7 +240,7 @@ Status FunctionLibraryRuntimeOverlay::GetRetTypes(Handle h, } void FunctionLibraryRuntimeOverlay::Run(const Options& opts, Handle handle, - gtl::ArraySlice args, + absl::Span args, std::vector* rets, DoneCallback done) { base_flr_->Run(opts, handle, args, rets, std::move(done)); @@ -253,7 +253,7 @@ void FunctionLibraryRuntimeOverlay::Run(const Options& opts, Handle handle, } Status FunctionLibraryRuntimeOverlay::RunSync(Options opts, Handle handle, - gtl::ArraySlice args, + absl::Span args, std::vector* rets) { return base_flr_->RunSync(std::move(opts), handle, args, rets); } @@ -354,11 +354,11 @@ class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime { Status CreateKernel(const std::shared_ptr& props, OpKernel** kernel) override; - void Run(const Options& opts, Handle handle, gtl::ArraySlice args, + void Run(const Options& opts, Handle handle, absl::Span args, std::vector* rets, DoneCallback done) override; void Run(const Options& opts, Handle handle, CallFrameInterface* frame, DoneCallback done) override; - Status RunSync(Options opts, Handle handle, gtl::ArraySlice args, + Status RunSync(Options opts, Handle handle, absl::Span args, std::vector* rets) override; Status RunSync(Options opts, Handle handle, CallFrameInterface* call_frame) override; @@ -453,7 +453,7 @@ class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime { bool IsLocalTarget(const InstantiateOptions& options) const; AttrValueMap FixAttrs(const AttrSlice& attrs); void RunRemote(const Options& opts, Handle handle, - gtl::ArraySlice args, std::vector* rets, + absl::Span args, std::vector* rets, Item* item, DoneCallback done); // TODO(fishx): Avoid using std::unique_ptr for PrivateIntraProcessRendezvous, @@ -897,50 +897,6 @@ Status FunctionLibraryRuntimeImpl::ReleaseHandle(Handle handle) { return parent_status; } -namespace { - -// Removes all stateless nodes that do not contribute to a return -// value from the function body. Unlike `RemoveDeadNodes()`, which is -// triggered by `OptimizerOptions.do_function_inlining`, this pass -// ignores the SINK node, from which (by definition) all nodes are -// reverse reachable, and preserves all nodes that are reachable from -// control output nodes. -// -// TODO(ezhulenev, skyewm): Function body should not have special treatment of -// stateful ops, graph should encode nodes that must execute with `control_ret` -// and `control_output`. -void PruneFunctionBody(const FunctionDef& fdef, Graph* g) { - VLOG(2) << "Pruning function body: function_name=" << fdef.signature().name(); - - // `control_ret` nodes must be always executed. - std::unordered_set control_ret_nodes; - for (const auto& control_ret : fdef.control_ret()) { - control_ret_nodes.insert(control_ret.second); - } - - std::unordered_set nodes; - for (auto n : g->nodes()) { - // NOTE(mrry): "_Retval" nodes are stateful, and so will be added - // to the seed set of `nodes`. "_Arg" nodes are also stateful, but we - // specifically exclude them as seeds, to avoid unconditionally executing - // unused argument nodes (e.g. in a function like `lambda x, y: y`). - // TODO(mrry): Investigate whether the `n->IsControlFlow()` test is - // still needed. It would be preferable to prune entire loops and/or - // conditionals if they are not used in the graph. - if (n->IsControlFlow() || - (n->op_def().is_stateful() && n->type_string() != kArgOp) || - (control_ret_nodes.find(n->name()) != control_ret_nodes.end())) { - nodes.insert(n); - } - } - bool changed = PruneForReverseReachability(g, std::move(nodes)); - if (changed) { - FixupSourceAndSinkEdges(g); - } -} - -} // namespace - Status FunctionLibraryRuntimeImpl::CreateItem(Item** item) { const FunctionBody* fbody; FunctionLibraryRuntime* flr; @@ -1052,7 +1008,7 @@ void FunctionLibraryRuntimeImpl::ExecutorArgsFromOptions( } void FunctionLibraryRuntimeImpl::RunRemote(const Options& opts, Handle handle, - gtl::ArraySlice args, + absl::Span args, std::vector* rets, Item* item, DoneCallback done) { string target_device = parent_->GetDeviceName(handle); @@ -1142,7 +1098,7 @@ void FunctionLibraryRuntimeImpl::RunRemote(const Options& opts, Handle handle, } void FunctionLibraryRuntimeImpl::Run(const Options& opts, Handle handle, - gtl::ArraySlice args, + absl::Span args, std::vector* rets, DoneCallback done) { if (opts.cancellation_manager && opts.cancellation_manager->IsCancelled()) { @@ -1203,7 +1159,7 @@ void FunctionLibraryRuntimeImpl::Run(const Options& opts, Handle handle, return tsl::profiler::TraceMeEncode( "FunctionRun", {{"id", run_opts.step_id}, {"_r", 1}}); }, - profiler::ContextType::kTfExecutor, *exec_args.function_trace_id, + tsl::profiler::ContextType::kTfExecutor, *exec_args.function_trace_id, tsl::profiler::TraceMeLevel::kInfo); bool allow_dead_tensors = run_opts.allow_dead_tensors; @@ -1275,7 +1231,7 @@ void FunctionLibraryRuntimeImpl::Run(const Options& opts, Handle handle, return tsl::profiler::TraceMeEncode("FunctionRun", {{"id", opts.step_id}, {"_r", 1}}); }, - profiler::ContextType::kTfExecutor, *exec_args.function_trace_id, + tsl::profiler::ContextType::kTfExecutor, *exec_args.function_trace_id, tsl::profiler::TraceMeLevel::kInfo); item->exec->RunAsync(exec_args, std::move(done)); @@ -1322,7 +1278,7 @@ Status FunctionLibraryRuntimeImpl::PrepareRunSync( } Status FunctionLibraryRuntimeImpl::RunSync(Options opts, Handle handle, - gtl::ArraySlice args, + absl::Span args, std::vector* rets) { Item* item = nullptr; std::unique_ptr rendezvous; diff --git a/tensorflow/core/common_runtime/function_def_utils.cc b/tensorflow/core/common_runtime/function_def_utils.cc index c4d87114291ac5..6cffeabc9205d4 100644 --- a/tensorflow/core/common_runtime/function_def_utils.cc +++ b/tensorflow/core/common_runtime/function_def_utils.cc @@ -15,18 +15,26 @@ limitations under the License. #include "tensorflow/core/common_runtime/function_def_utils.h" +#include #include #include +#include "absl/container/flat_hash_set.h" +#include "absl/strings/string_view.h" +#include "absl/types/span.h" #include "tensorflow/core/common_runtime/function_body.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/node_def_util.h" +#include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/control_flow.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/graph_debug_info_builder.h" #include "tensorflow/core/platform/refcount.h" +#include "tensorflow/core/platform/types.h" #include "tsl/platform/errors.h" +#include "tsl/platform/hash.h" +#include "tsl/platform/logging.h" namespace tensorflow { @@ -106,4 +114,64 @@ Status FunctionDefToBodyHelper(const FunctionDef& fdef, const AttrSlice& attrs, get_func_sig, fbody); } +namespace { +bool PrunableStatefulNode(const Node* n) { + // This set contains ops that are marked as "stateful" in their op + // registration, but can be pruned from a function graph if nothing depends + // on them. Typically, these are operations that are "impure" but have no + // side effects. For example, "ResourceGather" reads from a resource variable + // and can produce different results on each invocation (due to variable + // updates) but it does not itself modify the variable. + // TODO(b/341721055): Consolidate this set with other side effect modeling. + static const absl::flat_hash_set* prunable_stateful_ops = + new absl::flat_hash_set{ + FunctionLibraryDefinition::kArgOp, + "ResourceGather", + "ResourceGatherNd", + }; + return prunable_stateful_ops->contains(n->type_string()); +} +} // namespace + +// TODO(ezhulenev, skyewm): Function body should not have special treatment of +// stateful ops, graph should encode nodes that must execute with `control_ret` +// and `control_output`. +void PruneFunctionBody(const FunctionDef& fdef, Graph* g, + absl::Span additional_root_nodes) { + VLOG(2) << "Pruning function body: function_name=" << fdef.signature().name() + << " #nodes = " << g->num_nodes(); + + // `control_ret` nodes must be always executed. + absl::flat_hash_set + control_ret_nodes; + for (const auto& control_ret : fdef.control_ret()) { + control_ret_nodes.insert(control_ret.second); + } + + std::unordered_set nodes; + for (auto n : additional_root_nodes) { + nodes.insert(n); + } + for (auto n : g->nodes()) { + // NOTE(mrry): "_Retval" nodes are stateful, and so will be added + // to the seed set of `nodes`. "_Arg" nodes are also stateful, but we + // specifically exclude them as seeds, to avoid unconditionally executing + // unused argument nodes (e.g. in a function like `lambda x, y: y`). + // TODO(mrry): Investigate whether the `n->IsControlFlow()` test is + // still needed. It would be preferable to prune entire loops and/or + // conditionals if they are not used in the graph. + if (n->IsControlFlow() || + (n->op_def().is_stateful() && !PrunableStatefulNode(n)) || + (control_ret_nodes.find(n->name()) != control_ret_nodes.end())) { + nodes.insert(n); + } + } + bool changed = PruneForReverseReachability(g, std::move(nodes)); + if (changed) { + VLOG(2) << "Pruned function body and changed: function_name=" + << fdef.signature().name() << " #nodes = " << g->num_nodes(); + FixupSourceAndSinkEdges(g); + } +} + } // end namespace tensorflow diff --git a/tensorflow/core/common_runtime/function_def_utils.h b/tensorflow/core/common_runtime/function_def_utils.h index 1d60ce3b38c43c..b5f92660dc7f22 100644 --- a/tensorflow/core/common_runtime/function_def_utils.h +++ b/tensorflow/core/common_runtime/function_def_utils.h @@ -57,6 +57,15 @@ Status FunctionDefToBodyHelper( const std::function& get_func_sig, std::unique_ptr* fbody); +// Removes all stateless nodes that do not contribute to a return +// value from the function body. Unlike `RemoveDeadNodes()`, which is +// triggered by `OptimizerOptions.do_function_inlining`, this pass +// ignores the SINK node, from which (by definition) all nodes are +// reverse reachable, and preserves all nodes that are reachable from +// control output nodes. +void PruneFunctionBody(const FunctionDef& fdef, Graph* g, + absl::Span additional_root_nodes = {}); + } // end namespace tensorflow #endif // TENSORFLOW_CORE_COMMON_RUNTIME_FUNCTION_DEF_UTILS_H_ diff --git a/tensorflow/core/common_runtime/gpu/BUILD b/tensorflow/core/common_runtime/gpu/BUILD index b89d8b549fc465..d5e984dc44b8bd 100644 --- a/tensorflow/core/common_runtime/gpu/BUILD +++ b/tensorflow/core/common_runtime/gpu/BUILD @@ -308,6 +308,7 @@ tf_cuda_cc_test( srcs = [ "gpu_device_test.cc", ], + extra_copts = ["-DXLA_TEST_BACKEND_GPU=1"], # This is a mess features = ["-layering_check"], tags = tf_cuda_tests_tags(), deps = [ @@ -329,6 +330,7 @@ tf_cuda_cc_test( "//tensorflow/core/platform:test", "@local_tsl//tsl/framework:device_id", "@local_xla//xla/stream_executor/gpu:gpu_cudamallocasync_allocator_header", + "@local_xla//xla/tests:test_macros_header", ], ) @@ -391,6 +393,7 @@ tf_cuda_cc_test( "//tensorflow/core/kernels:ops_util", "//tensorflow/core/platform:test", "@local_xla//xla/stream_executor/gpu:gpu_cudamallocasync_allocator_header", + "@local_xla//xla/tests:test_macros_header", ], ) diff --git a/tensorflow/core/common_runtime/gpu/gpu_bfc_allocator_test.cc b/tensorflow/core/common_runtime/gpu/gpu_bfc_allocator_test.cc index 48aba783f1f526..b291186f0fba96 100644 --- a/tensorflow/core/common_runtime/gpu/gpu_bfc_allocator_test.cc +++ b/tensorflow/core/common_runtime/gpu/gpu_bfc_allocator_test.cc @@ -512,18 +512,6 @@ class GPUBFCAllocatorPrivateMethodsTest } } - void TestLog2FloorNonZeroSlow() { - GPUBFCAllocator a(GetParam()(1ull << 32), 1 /* total_memory */, "GPU_0_bfc", - {}); - EXPECT_EQ(-1, a.Log2FloorNonZeroSlow(0)); - EXPECT_EQ(0, a.Log2FloorNonZeroSlow(1)); - EXPECT_EQ(1, a.Log2FloorNonZeroSlow(2)); - EXPECT_EQ(1, a.Log2FloorNonZeroSlow(3)); - EXPECT_EQ(9, a.Log2FloorNonZeroSlow(1023)); - EXPECT_EQ(10, a.Log2FloorNonZeroSlow(1024)); - EXPECT_EQ(10, a.Log2FloorNonZeroSlow(1025)); - } - void TestForceAllowGrowth() { // Unset flag value uses provided option. unsetenv("TF_FORCE_GPU_ALLOW_GROWTH"); @@ -563,10 +551,6 @@ class GPUBFCAllocatorPrivateMethodsTest TEST_P(GPUBFCAllocatorPrivateMethodsTest, BinDebugInfo) { TestBinDebugInfo(); } -TEST_P(GPUBFCAllocatorPrivateMethodsTest, Log2FloorNonZeroSlow) { - TestLog2FloorNonZeroSlow(); -} - TEST_P(GPUBFCAllocatorPrivateMethodsTest, ForceAllowGrowth) { TestForceAllowGrowth(); } diff --git a/tensorflow/core/common_runtime/gpu/gpu_device.cc b/tensorflow/core/common_runtime/gpu/gpu_device.cc index 16f071c16c37c8..6f8761b4985b23 100644 --- a/tensorflow/core/common_runtime/gpu/gpu_device.cc +++ b/tensorflow/core/common_runtime/gpu/gpu_device.cc @@ -1923,11 +1923,9 @@ Status BaseGPUDeviceFactory::CreateDevices( #else // TENSORFLOW_USE_ROCM auto platform_name = xla::CudaName(); #endif // TENSORFLOW_USE_ROCM - auto memory_spaces = xla::BuildMemorySpaces(pjrt_devices); std::unique_ptr pjrt_client = std::make_unique( platform_name, xla_client, std::move(pjrt_devices), - std::move(memory_spaces), /*process_index=*/numa_node, /*allocator=*/std::move(allocator_adapter), /*host_memory_allocator=*/std::move(pjrt_gpu_host_allocator), diff --git a/tensorflow/core/common_runtime/gpu/gpu_device_test.cc b/tensorflow/core/common_runtime/gpu/gpu_device_test.cc index a68786b8c9343c..f5902c290ba6bb 100644 --- a/tensorflow/core/common_runtime/gpu/gpu_device_test.cc +++ b/tensorflow/core/common_runtime/gpu/gpu_device_test.cc @@ -23,6 +23,7 @@ limitations under the License. #include "xla/stream_executor/gpu/gpu_cudamallocasync_allocator.h" #include "xla/stream_executor/gpu/gpu_init.h" +#include "xla/tests/test_macros.h" #include "tensorflow/core/common_runtime/gpu/gpu_process_state.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/errors.h" @@ -37,6 +38,11 @@ limitations under the License. #include "tensorflow/core/tfrt/common/pjrt_util.h" #endif // TF_GPU_USE_PJRT +#if GOOGLE_CUDA +// Needed for CUDA_VERSION preprocessor directive +#include "third_party/gpus/cuda/include/cuda.h" +#endif + namespace tensorflow { namespace { diff --git a/tensorflow/core/common_runtime/graph_execution_state.cc b/tensorflow/core/common_runtime/graph_execution_state.cc index c2e115b80c3cb9..744e72fcf75d57 100644 --- a/tensorflow/core/common_runtime/graph_execution_state.cc +++ b/tensorflow/core/common_runtime/graph_execution_state.cc @@ -778,11 +778,11 @@ Status GraphExecutionState::OptimizeGraph( node_names.insert(node->name()); } } - for (const auto& feed : item.feed) { + for (auto& feed : item.feed) { SafeTensorId tensor_id = ParseTensorName(feed.first); if (node_names.find(tensor_id.node()) == node_names.end()) { return errors::InvalidArgument("Invalid feed, no such node in graph: ", - feed.first); + std::move(feed.first)); } } for (const auto& fetch : item.fetch) { diff --git a/tensorflow/core/common_runtime/lower_function_call_op.cc b/tensorflow/core/common_runtime/lower_function_call_op.cc index b05509e5246e9e..c5226b4eefcc85 100644 --- a/tensorflow/core/common_runtime/lower_function_call_op.cc +++ b/tensorflow/core/common_runtime/lower_function_call_op.cc @@ -18,9 +18,11 @@ limitations under the License. #include #include "absl/algorithm/container.h" +#include "absl/types/span.h" #include "tensorflow/core/common_runtime/function_def_utils.h" #include "tensorflow/core/common_runtime/inline_function_utils.h" #include "tensorflow/core/common_runtime/lower_function_call_inline_policy.h" +#include "tensorflow/core/config/flag_defs.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/graph_node_util.h" @@ -87,6 +89,20 @@ Status RewriteFunctionCallNode(Node* n, Graph* g, TF_RETURN_IF_ERROR( FunctionDefToBodyHelper(std::move(fdef), n->attrs(), &flib_def, &fbody)); + if (flags::Global().enable_function_pruning_before_inlining.value()) { + // TODO(b/341325107): Enable this path by default and remove the flag. + VLOG(2) << "Pruning enabled before inlining"; + // NOTE(mrry): We pass `fbody->arg_nodes` as an additional set of roots, + // because otherwise the `FunctionBody` state will become inconsistent. + // The unused `Identity` nodes will be colocated with the arguments, and + // pruned in a subsequent pass. + PruneFunctionBody( + fbody->record->fdef(), fbody->graph, + absl::Span(fbody->arg_nodes.data(), fbody->arg_nodes.size())); + } else { + VLOG(2) << "Pruning disabled before inlining"; + } + Status can_inline_function_call = ValidateInlining(n, fbody.get(), inline_options); if (can_inline_function_call.ok()) { diff --git a/tensorflow/core/common_runtime/lower_function_call_op_test.cc b/tensorflow/core/common_runtime/lower_function_call_op_test.cc index 600c422c8f546c..ea3de9500b9d3c 100644 --- a/tensorflow/core/common_runtime/lower_function_call_op_test.cc +++ b/tensorflow/core/common_runtime/lower_function_call_op_test.cc @@ -23,6 +23,7 @@ limitations under the License. #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/graph_runner.h" #include "tensorflow/core/common_runtime/lower_functional_ops.h" +#include "tensorflow/core/config/flag_defs.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" @@ -133,6 +134,95 @@ TEST(LowerFunctionCallTest, InlineFunctionCall) { } } +TEST(LowerFunctionCallTest, InlineFunctionCallAfterPruning) { + flags::Global().enable_function_pruning_before_inlining.reset(true); + using FDH = FunctionDefHelper; + + std::unique_ptr graph(new Graph(OpRegistry::Global())); + + FunctionDefLibrary f_lib_proto; + + // `add` node is not required to compute regular output `o`, but it must + // execute because it is in `control_ret`. + // The `div` node and the unused arguments `j` and `k` should be pruned. + *(f_lib_proto.add_function()) = FDH::Create( + "AddAndMul", {"i: int32", "j: int32", "k: int32", "r: resource"}, + {"o: int32"}, {}, + {{{"add"}, "Add", {"i", "i"}, {{"T", DT_INT32}}}, + {{"div"}, "FloorDiv", {"i", "i"}, {{"T", DT_INT32}}}, + {{"gather"}, + "ResourceGather", + {"r", "i"}, + {{"Tindices", DT_INT32}, {"dtype", DT_FLOAT}}}, + {{"ret"}, "Mul", {"i", "i"}, {{"T", DT_INT32}}}}, + /*ret_def=*/{{"o", "ret:z:0"}}, + /*control_ret_def=*/{{"must_execute", "add"}}); + + // Construct a graph: + // X = Placeholder[dtype=int32] + // Y = Placeholder[dtype=int32] + // Z = Placeholder[dtype=int32] + // R = Placeholder[dtype=resource] + // F = PartitionedCall[f=AddAndMul](a) + // B = Identity(func, ^func) + Scope root = Scope::NewRootScope().ExitOnError(); + TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto)); + auto x = ops::Placeholder(root.WithOpName("X"), DT_INT32); + auto y = ops::Placeholder(root.WithOpName("Y"), DT_INT32); + auto z = ops::Placeholder(root.WithOpName("Z"), DT_INT32); + auto r = ops::Placeholder(root.WithOpName("R"), DT_RESOURCE); + Node* function_call; + std::vector inputs( + {NodeBuilder::NodeOut(x.node()), NodeBuilder::NodeOut(y.node()), + NodeBuilder::NodeOut(z.node()), NodeBuilder::NodeOut(r.node())}); + TF_ASSERT_OK(NodeBuilder("F", "PartitionedCall", &root.graph()->flib_def()) + .Input(inputs) + .Attr("Tin", {DT_INT32, DT_INT32, DT_INT32, DT_RESOURCE}) + .Attr("Tout", {DT_INT32}) + .Attr("f", FuncAttr("AddAndMul")) + .Finalize(root.graph(), &function_call)); + TF_ASSERT_OK(root.DoShapeInference(function_call)); + + auto b = ops::Identity(root.WithOpName("B"), Output(function_call, 0)); + root.graph()->AddControlEdge(function_call, b.node()); + + TF_ASSERT_OK(root.ToGraph(graph.get())); + TF_ASSERT_OK(Rewrite(&graph)); + + // Verify the resultant graph has no PartitionedCall ops and function body was + // inlined into the main graph. + int partitioned_call_count = 0; + int add_count = 0; + int mul_count = 0; + int floor_div_count = 0; + int resource_gather_count = 0; + for (const auto* op : graph->op_nodes()) { + if (op->IsPartitionedCall()) partitioned_call_count++; + if (op->type_string() == "Add") add_count++; + if (op->type_string() == "Mul") mul_count++; + if (op->type_string() == "FloorDiv") floor_div_count++; + if (op->type_string() == "ResourceGather") resource_gather_count++; + } + + ASSERT_EQ(partitioned_call_count, 0); + ASSERT_EQ(add_count, 1); + ASSERT_EQ(mul_count, 1); + ASSERT_EQ(floor_div_count, 0); + ASSERT_EQ(resource_gather_count, 0); + + // Verify execution. + ClientSession session(root, SessionOptionsWithInlining()); + { + ClientSession::FeedType feeds; + feeds.emplace(Output(x.node()), Input::Initializer(10)); + std::vector out_tensors; + TF_ASSERT_OK(session.Run(feeds, {Output(b)}, &out_tensors)); + EXPECT_EQ(out_tensors.size(), 1); + EXPECT_EQ(out_tensors[0].scalar()(), 100); + } + flags::Global().enable_function_pruning_before_inlining.reset(false); +} + TEST(LowerFunctionCallTest, DoNotInlineTpuOrXlaFunctions) { std::unique_ptr graph(new Graph(OpRegistry::Global())); diff --git a/tensorflow/core/common_runtime/lower_while_op_test.cc b/tensorflow/core/common_runtime/lower_while_op_test.cc index e1237cd5555b5f..b57145c73167ff 100644 --- a/tensorflow/core/common_runtime/lower_while_op_test.cc +++ b/tensorflow/core/common_runtime/lower_while_op_test.cc @@ -13,10 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include +#include + #include #include "absl/strings/match.h" #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/framework/ops.h" +#include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/control_flow_ops_internal.h" #include "tensorflow/cc/ops/function_ops.h" @@ -24,15 +28,18 @@ limitations under the License. #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/graph_runner.h" #include "tensorflow/core/common_runtime/lower_functional_ops.h" +#include "tensorflow/core/config/flag_defs.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" +#include "tsl/lib/core/status_test_util.h" namespace tensorflow { namespace { @@ -173,6 +180,65 @@ TEST(LowerWhileOpTest, Simple) { } } +static void DanglingNodeTestHelper(int expected_count) { + std::unique_ptr graph(new Graph(OpRegistry::Global())); + + // Add test functions for cond and body. + FunctionDefLibrary f_lib_proto; + *f_lib_proto.add_function() = + test::function::XTimesTwoWithDanglingFloorDivNode(); + *f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8); + + Scope root = Scope::NewRootScope().ExitOnError(); + TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto)); + auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32); + Node* while_node; + std::vector inputs({NodeBuilder::NodeOut(a.node())}); + AttrValue cond_func; + cond_func.mutable_func()->set_name("LessThanOrEqualToN"); + AttrValue body_func; + body_func.mutable_func()->set_name("XTimesTwoWithDanglingFloorDivNode"); + TF_ASSERT_OK( + NodeBuilder("while", "While", &root.graph()->flib_def()) + .Input(inputs) + .Attr("T", {DT_INT32}) + .Attr("cond", cond_func) + .Attr("body", body_func) + .Attr("parallel_iterations", 100) + .Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true) + .Finalize(root.graph(), &while_node)); + auto c = ops::Identity( + root.WithOpName("C").WithControlDependencies(Output(while_node)), + Output(while_node)); + TF_ASSERT_OK(root.DoShapeInference(while_node)); + TF_ASSERT_OK(root.ToGraph(graph.get())); + + TF_ASSERT_OK(Rewrite(&graph)); + + int mul_count = 0; + int floor_div_count = 0; + + for (const auto* op : graph->op_nodes()) { + if (op->type_string() == "Mul") { + mul_count++; + } + if (op->type_string() == "FloorDiv") { + floor_div_count++; + } + } + + ASSERT_EQ(mul_count, 1); + ASSERT_EQ(floor_div_count, expected_count); +} + +TEST(LowerWhileOpTest, DanglingNode) { DanglingNodeTestHelper(1); } + +TEST(LowerWhileOpTest, DanglingNodeWithPruning) { + flags::Global().enable_function_pruning_before_inlining.reset(true); + DanglingNodeTestHelper(0); + flags::Global().enable_function_pruning_before_inlining.reset(false); +} + TEST(LowerWhileOpTest, ForwardAssignedInputDevice) { std::unique_ptr graph(new Graph(OpRegistry::Global())); diff --git a/tensorflow/core/common_runtime/next_pluggable_device/BUILD b/tensorflow/core/common_runtime/next_pluggable_device/BUILD index 41fd6207bd188d..58ccde7524408f 100644 --- a/tensorflow/core/common_runtime/next_pluggable_device/BUILD +++ b/tensorflow/core/common_runtime/next_pluggable_device/BUILD @@ -304,6 +304,7 @@ cc_library( "//tensorflow/c:kernels_hdrs", "//tensorflow/c:tf_status_helper", "//tensorflow/core/common_runtime/next_pluggable_device:flags", + "@com_google_absl//absl/flags:flag", ], ) diff --git a/tensorflow/core/common_runtime/next_pluggable_device/plugin_coordination_service_agent_helper.h b/tensorflow/core/common_runtime/next_pluggable_device/plugin_coordination_service_agent_helper.h index 5d2bc60d74fc4f..a5adfa50656039 100644 --- a/tensorflow/core/common_runtime/next_pluggable_device/plugin_coordination_service_agent_helper.h +++ b/tensorflow/core/common_runtime/next_pluggable_device/plugin_coordination_service_agent_helper.h @@ -18,6 +18,7 @@ limitations under the License. #include +#include "absl/flags/flag.h" #include "tensorflow/c/kernels.h" #include "tensorflow/c/tf_status_helper.h" #include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h" diff --git a/tensorflow/core/common_runtime/optimized_function_graph_info.h b/tensorflow/core/common_runtime/optimized_function_graph_info.h index b2bd9af5bb1c5a..dd05b026ebfed2 100644 --- a/tensorflow/core/common_runtime/optimized_function_graph_info.h +++ b/tensorflow/core/common_runtime/optimized_function_graph_info.h @@ -71,9 +71,10 @@ struct OptimizedFunctionGraphInfo { OptimizedFunctionGraphInfo(OptimizedFunctionGraphInfo& info) = delete; OptimizedFunctionGraphInfo& operator=(OptimizedFunctionGraphInfo& info) = delete; - OptimizedFunctionGraphInfo(OptimizedFunctionGraphInfo&& info) = default; - OptimizedFunctionGraphInfo& operator=(OptimizedFunctionGraphInfo&& info) = + OptimizedFunctionGraphInfo(OptimizedFunctionGraphInfo&& info) noexcept = default; + OptimizedFunctionGraphInfo& operator=( + OptimizedFunctionGraphInfo&& info) noexcept = default; // Converts from the struct to OptimizedFunctionGraph proto. static OptimizedFunctionGraph ToProto(const OptimizedFunctionGraphInfo& info); diff --git a/tensorflow/core/common_runtime/placer.cc b/tensorflow/core/common_runtime/placer.cc index 05dc029cc74756..2d03de9f2434bc 100644 --- a/tensorflow/core/common_runtime/placer.cc +++ b/tensorflow/core/common_runtime/placer.cc @@ -27,6 +27,7 @@ limitations under the License. #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/graph_node_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/path.h" @@ -117,6 +118,42 @@ bool IsGeneratorNode(const Node* node) { !IsRefType(node->output_type(0)); } +// If a node is an Identity op with input and output on the same device, +// assign this Identity the same device. If the node already has a requested +// or assigned device, don't touch it. +bool MatchIdentityOperation(const Node* node) { + if (!node) { + return false; + } + + if (!node->IsIdentity()) { + return false; + } + + if (node->has_assigned_device_name()) { + return false; + } + + if (!node->requested_device().empty()) { + return false; + } + + // Strictly only check for IDENTITY nodes with only 1 input and + // 1 output edge. + if (node->in_edges().size() != 1) { + return false; + } + + if (node->out_edges().size() != 1) { + return false; + } + + const Node* input = *node->in_nodes().begin(); + const Node* output = *node->out_nodes().begin(); + + return input->requested_device() == output->requested_device(); +} + void LogDeviceAssignment(const Node* node, bool log_device_placement) { // Log placement if log_device_placement is set. if (log_device_placement) { @@ -254,10 +291,10 @@ Status Placer::Run(const GraphOptimizationPassOptions& options) { // to perform good placement we can add an interface for this. int assigned_device = -1; - // Heuristic B: If the node only operates on metadata, not data, - // then it is desirable to place that metadata node with its + // Heuristic B: If the node only operates on metadata (not data) or is + // an identity node, then it is desirable to place that node with its // input. - if (IsMetadata(node)) { + if (IsMetadata(node) || MatchIdentityOperation(node)) { // Make sure that the input device type is in the list of supported // device types for this node. const Node* input = (*node->in_edges().begin())->src(); diff --git a/tensorflow/core/common_runtime/placer_test.cc b/tensorflow/core/common_runtime/placer_test.cc index aeca73546468c0..6d76bc7d12c86e 100644 --- a/tensorflow/core/common_runtime/placer_test.cc +++ b/tensorflow/core/common_runtime/placer_test.cc @@ -213,6 +213,8 @@ REGISTER_OP("TestTypedConsumer").Input("i: variant"); REGISTER_KERNEL_BUILDER(Name("TestTypedConsumer").Device("FakeCPU"), DummyOp); REGISTER_KERNEL_BUILDER(Name("TestTypedConsumer").Device("FakeGPU"), DummyOp); +REGISTER_OP("ConvertToListOfCooTensorsV2").Input("i: int32"); + //////////////////////////////////////////////////////////////////////////////// // // A PlacerTest method has three phases: @@ -1948,6 +1950,9 @@ REGISTER_KERNEL_BUILDER(Name("Add").Device("FakeCPU"), DummyOp); REGISTER_KERNEL_BUILDER(Name("Add").Device("FakeGPU"), DummyOp); REGISTER_KERNEL_BUILDER(Name("PartitionedCall").Device("FakeCPU"), DummyOp); REGISTER_KERNEL_BUILDER(Name("PartitionedCall").Device("FakeGPU"), DummyOp); +REGISTER_KERNEL_BUILDER(Name("ConvertToListOfCooTensorsV2").Device("FakeCPU"), + DummyOp); +REGISTER_KERNEL_BUILDER(Name("Cast").Device("FakeCPU"), DummyOp); TEST_P(SoftPlacementPlacerTest, RequestedDeviceOnResourceGeneratorIsTreatedAsAssigned) { @@ -3108,5 +3113,126 @@ TEST_F(NestedPlacerTest, IndirectRecursion) { << s.ToString(); } +TEST_F(PlacerTest, IdentityMatchesInputAndOutputPlacement) { + /* + * Op Input (assigned to task:1) + * | + * v + * // Tests that this gets reassigned to task:1 + * Identity (No Assignment) + * | + * v + * Op Output (assigned to task:1) + */ + const std::string task0_device = "/job:b/replica:0/task:0/device:FakeCPU:0"; + const std::string task1_device = "/job:b/replica:0/task:1/device:FakeCPU:0"; + + GraphDef graph = GDef({ + NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}, task1_device), + NDef("identity1", "Identity", {"a"}, {{"T", DT_FLOAT}}, task1_device), + NDef("identity2", "Identity", {"identity1:0"}, {{"T", DT_FLOAT}}), + NDef("cast", "Cast", {"identity2:0"}, + {{"SrcT", DT_FLOAT}, {"DstT", DT_INT32}}, task1_device), + NDef("COO", "ConvertToListOfCooTensorsV2", {"cast:0"}, {{"T", DT_INT32}}, + task1_device), + }); + + Graph g(OpRegistry::Global()); + + DeviceSet multiple_tasks; + std::unique_ptr task0_cpu(FakeDevice::MakeCPU(task0_device)); + multiple_tasks.AddDevice(task0_cpu.get()); + + std::unique_ptr task1_cpu(FakeDevice::MakeCPU(task1_device)); + multiple_tasks.AddDevice(task1_cpu.get()); + + TF_ASSERT_OK(BuildGraph(graph, &g)); + + absl::Status s = Place(&g, &multiple_tasks); + TF_ASSERT_OK(s); + + Node* identity2 = GetNodeByName(g, "identity2"); + EXPECT_EQ(identity2->assigned_device_name().c_str(), task1_device); +} + +TEST_F(PlacerTest, IdentityWithoutOutputDoesntCrash) { + /* + * Op Input (assigned to task:1) + * | + * v + * // Tests that this doesn't crash. + * Identity (No output) + */ + const std::string task0_device = "/job:b/replica:0/task:0/device:FakeCPU:0"; + const std::string task1_device = "/job:b/replica:0/task:1/device:FakeCPU:0"; + + GraphDef graph = GDef({ + NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}, task1_device), + NDef("identity1", "Identity", {"a"}, {{"T", DT_FLOAT}}, task1_device), + NDef("identity2", "Identity", {"identity1:0"}, {{"T", DT_FLOAT}}), + }); + + Graph g(OpRegistry::Global()); + + DeviceSet multiple_tasks; + std::unique_ptr task0_cpu(FakeDevice::MakeCPU(task0_device)); + multiple_tasks.AddDevice(task0_cpu.get()); + + std::unique_ptr task1_cpu(FakeDevice::MakeCPU(task1_device)); + multiple_tasks.AddDevice(task1_cpu.get()); + + TF_ASSERT_OK(BuildGraph(graph, &g)); + Node* identity2 = GetNodeByName(g, "identity2"); + const Edge* out_edge = *identity2->out_edges().begin(); + + g.RemoveEdge(out_edge); + + absl::Status s = Place(&g, &multiple_tasks); + TF_ASSERT_OK(s); +} + +TEST_F(PlacerTest, IdentityDoesntMatchWithMultipleOutput) { + /* + * Op Input (assigned to task:1) + * | + * v + * // Tests that identity gets assigned to default task:0 + * Identity (No Assignment) + * | + * v + * Multiple Op Output (assigned to task:1) + */ + const std::string task0_device = "/job:b/replica:0/task:0/device:FakeCPU:0"; + const std::string task1_device = "/job:b/replica:0/task:1/device:FakeCPU:0"; + + GraphDef graph = GDef({ + NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}, task1_device), + NDef("identity1", "Identity", {"a"}, {{"T", DT_FLOAT}}, task1_device), + NDef("identity2", "Identity", {"identity1:0"}, {{"T", DT_FLOAT}}), + NDef("cast", "Cast", {"identity2:0"}, + {{"SrcT", DT_FLOAT}, {"DstT", DT_INT32}}, task1_device), + NDef("COO", "ConvertToListOfCooTensorsV2", {"cast:0"}, {{"T", DT_INT32}}, + task1_device), + NDef("identity3", "Identity", {"identity2:0"}, {{"T", DT_FLOAT}}), + }); + + Graph g(OpRegistry::Global()); + + DeviceSet multiple_tasks; + std::unique_ptr task0_cpu(FakeDevice::MakeCPU(task0_device)); + multiple_tasks.AddDevice(task0_cpu.get()); + + std::unique_ptr task1_cpu(FakeDevice::MakeCPU(task1_device)); + multiple_tasks.AddDevice(task1_cpu.get()); + + TF_ASSERT_OK(BuildGraph(graph, &g)); + + absl::Status s = Place(&g, &multiple_tasks); + TF_ASSERT_OK(s); + + Node* identity2 = GetNodeByName(g, "identity2"); + EXPECT_EQ(identity2->assigned_device_name().c_str(), task0_device); +} + } // namespace } // namespace tensorflow diff --git a/tensorflow/core/common_runtime/process_util.cc b/tensorflow/core/common_runtime/process_util.cc index 53d73952219ecb..e0fa771c4b8280 100644 --- a/tensorflow/core/common_runtime/process_util.cc +++ b/tensorflow/core/common_runtime/process_util.cc @@ -26,9 +26,9 @@ limitations under the License. #include "tensorflow/core/platform/byte_order.h" #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/platform/logging.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/util.h" +#include "tsl/platform/tracing.h" namespace tensorflow { @@ -166,14 +166,15 @@ thread::ThreadPool* NewThreadPoolFromSessionOptions( } void SchedClosure(absl::AnyInvocable closure) { - if (!tracing::EventCollector::IsEnabled()) { + if (!tsl::tracing::EventCollector::IsEnabled()) { return Env::Default()->SchedClosure(std::move(closure)); } - uint64 id = tracing::GetUniqueArg(); - tracing::RecordEvent(tracing::EventCategory::kScheduleClosure, id); + uint64 id = tsl::tracing::GetUniqueArg(); + tsl::tracing::RecordEvent(tsl::tracing::EventCategory::kScheduleClosure, id); Env::Default()->SchedClosure([id, closure = std::move(closure)]() mutable { - tracing::ScopedRegion region(tracing::EventCategory::kRunClosure, id); + tsl::tracing::ScopedRegion region(tsl::tracing::EventCategory::kRunClosure, + id); closure(); }); } diff --git a/tensorflow/core/common_runtime/single_threaded_executor_test.cc b/tensorflow/core/common_runtime/single_threaded_executor_test.cc index 04fcd51647efc8..a53e65d7a7a513 100644 --- a/tensorflow/core/common_runtime/single_threaded_executor_test.cc +++ b/tensorflow/core/common_runtime/single_threaded_executor_test.cc @@ -41,7 +41,6 @@ limitations under the License. #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { diff --git a/tensorflow/core/common_runtime/threadpool_device.cc b/tensorflow/core/common_runtime/threadpool_device.cc index cd77afcf53dbd3..a06e9e90b7ba16 100644 --- a/tensorflow/core/common_runtime/threadpool_device.cc +++ b/tensorflow/core/common_runtime/threadpool_device.cc @@ -41,7 +41,6 @@ info. It does not have any negative impact on performance. */ #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/types.h" #include "tensorflow/core/lib/hash/hash.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/util/port.h" diff --git a/tensorflow/core/config/flag_defs.h b/tensorflow/core/config/flag_defs.h index bc91a9bcc4247b..a773fbb1b20c1c 100644 --- a/tensorflow/core/config/flag_defs.h +++ b/tensorflow/core/config/flag_defs.h @@ -61,6 +61,9 @@ class Flags { "propagated during while op lowering to switch/merge ops.") TF_DECLARE_FLAG(enable_tf2min_ici_weight, false, "If true, ici weight optimization will be used in tf2/min.") + // TODO(b/341325107): Make this behavior the default and remove the flag. + TF_DECLARE_FLAG(enable_function_pruning_before_inlining, false, + "If true, functions will be pruned before inlining.") // LINT.ThenChange(//tensorflow/core/config/flags_api_wrapper.cc) }; diff --git a/tensorflow/core/config/flags_api_wrapper.cc b/tensorflow/core/config/flags_api_wrapper.cc index e6c2192523cca9..a4925180c1ef53 100644 --- a/tensorflow/core/config/flags_api_wrapper.cc +++ b/tensorflow/core/config/flags_api_wrapper.cc @@ -55,5 +55,6 @@ PYBIND11_MODULE(flags_pybind, m) { TF_PY_DECLARE_FLAG(enable_aggressive_constant_replication); TF_PY_DECLARE_FLAG(enable_colocation_key_propagation_in_while_op_lowering); TF_PY_DECLARE_FLAG(enable_tf2min_ici_weight) + TF_PY_DECLARE_FLAG(enable_function_pruning_before_inlining) // LINT.ThenChange(//tensorflow/core/config/flag_defs.h) }; diff --git a/tensorflow/core/data/service/snapshot/BUILD b/tensorflow/core/data/service/snapshot/BUILD index 523bfebc44dcb5..40b5cbaa6873aa 100644 --- a/tensorflow/core/data/service/snapshot/BUILD +++ b/tensorflow/core/data/service/snapshot/BUILD @@ -462,7 +462,7 @@ cc_library( tf_cc_test( name = "snapshot_stream_writer_checkpoint_test", - size = "small", + size = "medium", srcs = ["snapshot_stream_writer_checkpoint_test.cc"], deps = [ ":path_utils", diff --git a/tensorflow/core/debug/debug_grpc_io_utils_test.cc b/tensorflow/core/debug/debug_grpc_io_utils_test.cc index 235bdaed604270..3eaf3651126528 100644 --- a/tensorflow/core/debug/debug_grpc_io_utils_test.cc +++ b/tensorflow/core/debug/debug_grpc_io_utils_test.cc @@ -24,7 +24,6 @@ limitations under the License. #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/test.h" -#include "tensorflow/core/platform/tracing.h" namespace tensorflow { diff --git a/tensorflow/core/debug/debug_grpc_testlib.cc b/tensorflow/core/debug/debug_grpc_testlib.cc index 4927caf5a3285a..2bc06061c459f7 100644 --- a/tensorflow/core/debug/debug_grpc_testlib.cc +++ b/tensorflow/core/debug/debug_grpc_testlib.cc @@ -23,7 +23,6 @@ limitations under the License. #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/protobuf.h" -#include "tensorflow/core/platform/tracing.h" namespace tensorflow { diff --git a/tensorflow/core/distributed_runtime/master_session.cc b/tensorflow/core/distributed_runtime/master_session.cc index 5593963988d9e5..41ce2df923cad3 100644 --- a/tensorflow/core/distributed_runtime/master_session.cc +++ b/tensorflow/core/distributed_runtime/master_session.cc @@ -60,10 +60,10 @@ limitations under the License. #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/mutex.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/util/device_name_utils.h" +#include "tsl/platform/tracing.h" #include "tsl/protobuf/coordination_config.pb.h" namespace tensorflow { diff --git a/tensorflow/core/distributed_runtime/rpc/BUILD b/tensorflow/core/distributed_runtime/rpc/BUILD index a9f2370ab81fa1..959a8abae6518a 100644 --- a/tensorflow/core/distributed_runtime/rpc/BUILD +++ b/tensorflow/core/distributed_runtime/rpc/BUILD @@ -241,6 +241,7 @@ cc_library( "//tensorflow/core/profiler/lib:traceme", "//tensorflow/core/protobuf:master_proto_cc", "@com_google_absl//absl/time", + "@local_tsl//tsl/platform:env", "@local_tsl//tsl/platform:retrying_utils", ], alwayslink = 1, diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_master_service.cc b/tensorflow/core/distributed_runtime/rpc/grpc_master_service.cc index abc9d12969147e..de9bd049d0d468 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_master_service.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_master_service.cc @@ -41,7 +41,6 @@ limitations under the License. #include "tensorflow/core/distributed_runtime/rpc/grpc_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/protobuf/master.pb.h" diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_remote_master.cc b/tensorflow/core/distributed_runtime/rpc/grpc_remote_master.cc index c47dc31fc3ef4e..d180b4dc236451 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_remote_master.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_remote_master.cc @@ -27,10 +27,10 @@ limitations under the License. #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/protobuf/master.pb.h" #include "tsl/platform/retrying_utils.h" +#include "tsl/platform/tracing.h" namespace tensorflow { @@ -116,7 +116,7 @@ class GrpcRemoteMaster : public MasterInterface { // Start tracing, attaching a unique ID to both the trace and the RPC. tsl::profiler::TraceMe* NewTraceRpc(StringPiece name, ::grpc::ClientContext* ctx) { - string trace_id = strings::StrCat(tracing::GetUniqueArg()); + string trace_id = strings::StrCat(tsl::tracing::GetUniqueArg()); ctx->AddMetadata(GrpcIdKey(), trace_id); return new tsl::profiler::TraceMe( [&] { return strings::StrCat(name, ":", trace_id); }, diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc b/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc index 05bf31918ef543..d1ad012a28f222 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc @@ -33,7 +33,6 @@ limitations under the License. #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/protobuf/transport_options.pb.h" #include "tensorflow/core/protobuf/worker.pb.h" #include "tensorflow/core/util/env_var.h" diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc index a990f41af261a2..3fac884a73bd11 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc @@ -52,10 +52,10 @@ limitations under the License. #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mutex.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h" #include "tensorflow/core/protobuf/transport_options.pb.h" #include "tensorflow/core/protobuf/worker.pb.h" +#include "tsl/platform/tracing.h" #include "tsl/protobuf/rpc_options.pb.h" namespace tensorflow { diff --git a/tensorflow/core/distributed_runtime/worker.cc b/tensorflow/core/distributed_runtime/worker.cc index 42708852b36f36..0922b04de0b0f8 100644 --- a/tensorflow/core/distributed_runtime/worker.cc +++ b/tensorflow/core/distributed_runtime/worker.cc @@ -27,8 +27,8 @@ limitations under the License. #include "tensorflow/core/distributed_runtime/tensor_coding.h" #include "tensorflow/core/distributed_runtime/worker_session.h" #include "tensorflow/core/framework/collective.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/device_profiler_session.h" +#include "tsl/platform/tracing.h" #include "tsl/protobuf/distributed_runtime_payloads.pb.h" namespace tensorflow { diff --git a/tensorflow/core/framework/BUILD b/tensorflow/core/framework/BUILD index 617404f2722585..2051a3d14ea024 100644 --- a/tensorflow/core/framework/BUILD +++ b/tensorflow/core/framework/BUILD @@ -873,8 +873,8 @@ tf_cuda_library( "//tensorflow/core/platform:tensor_coding", "//tensorflow/core/platform:types", "//tensorflow/core/public:version", - "//tensorflow/core/util:managed_stack_trace", "@com_google_absl//absl/memory", + "@com_google_absl//absl/numeric:bits", "@com_google_absl//absl/strings", "@eigen_archive//:eigen3", "@local_tsl//tsl/framework:device_type", diff --git a/tensorflow/core/framework/dataset.h b/tensorflow/core/framework/dataset.h index effa997eca60f9..394aa9a2d6e86e 100644 --- a/tensorflow/core/framework/dataset.h +++ b/tensorflow/core/framework/dataset.h @@ -60,7 +60,6 @@ limitations under the License. #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/refcount.h" #include "tensorflow/core/platform/status.h" -#include "tensorflow/core/platform/tracing.h" #include "tsl/framework/allocator.h" #include "tsl/platform/errors.h" #include "tsl/platform/thread_annotations.h" diff --git a/tensorflow/core/framework/function_testlib.cc b/tensorflow/core/framework/function_testlib.cc index 7303228b935bbf..ae06188b8bc83a 100644 --- a/tensorflow/core/framework/function_testlib.cc +++ b/tensorflow/core/framework/function_testlib.cc @@ -15,8 +15,11 @@ limitations under the License. #include "tensorflow/core/framework/function_testlib.h" +#include + #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/node_def.pb.h" +#include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/lib/core/threadpool.h" @@ -180,6 +183,26 @@ FunctionDef XTimesTwoWithControlOutput() { {{"dummy", "dummy"}}); } +FunctionDef XTimesTwoWithDanglingFloorDivNode() { + const Tensor kTwo = test::AsScalar(2); + return FDH::Define( + // Name + "XTimesTwoWithDanglingFloorDivNode", + // Args + {"x: T"}, + // Return values + {"y: T"}, + // Attr def + {"T: {float, double, int32, int64}"}, + // Nodes + { + {{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}}, + {{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}, + {{"z"}, "FloorDiv", {"x", "scale"}, {{"T", "$T"}}}, + {{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}}, + }); +} + FunctionDef TwoDeviceMult() { const Tensor kTwo = test::AsScalar(2); const Tensor kThree = test::AsScalar(3); diff --git a/tensorflow/core/framework/function_testlib.h b/tensorflow/core/framework/function_testlib.h index 0aba30650d5f84..06e0c3a6d36ca9 100644 --- a/tensorflow/core/framework/function_testlib.h +++ b/tensorflow/core/framework/function_testlib.h @@ -74,6 +74,8 @@ FunctionDef XTimesTwo(); FunctionDef XTimesTwoWithControlInput(); // Same as `XTimesTwo` above, but with a `dummy` control output node. FunctionDef XTimesTwoWithControlOutput(); +// Same as `XTimesTwo` above, but with a dangling `FloorDiv` node. +FunctionDef XTimesTwoWithDanglingFloorDivNode(); // x: T -> cpu(x * 2) + cpu(x * 3). FunctionDef TwoDeviceTimesFive(); diff --git a/tensorflow/core/framework/run_handler.cc b/tensorflow/core/framework/run_handler.cc index 5ff63469ae4aa9..e68025643817d8 100644 --- a/tensorflow/core/framework/run_handler.cc +++ b/tensorflow/core/framework/run_handler.cc @@ -31,8 +31,8 @@ limitations under the License. #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/numa.h" #include "tensorflow/core/platform/setround.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" +#include "tsl/platform/tracing.h" namespace tensorflow { namespace { @@ -67,9 +67,10 @@ RunHandlerEnvironment::EnvThread* RunHandlerEnvironment::CreateThread( RunHandlerEnvironment::Task RunHandlerEnvironment::CreateTask( std::function f) { uint64 id = 0; - if (tracing::EventCollector::IsEnabled()) { - id = tracing::GetUniqueArg(); - tracing::RecordEvent(tracing::EventCategory::kScheduleClosure, id); + if (tsl::tracing::EventCollector::IsEnabled()) { + id = tsl::tracing::GetUniqueArg(); + tsl::tracing::RecordEvent(tsl::tracing::EventCategory::kScheduleClosure, + id); } return Task{ std::unique_ptr(new TaskImpl{ @@ -82,8 +83,8 @@ RunHandlerEnvironment::Task RunHandlerEnvironment::CreateTask( void RunHandlerEnvironment::ExecuteTask(const Task& t) { WithContext wc(t.f->context); - tracing::ScopedRegion region(tracing::EventCategory::kRunClosure, - t.f->trace_id); + tsl::tracing::ScopedRegion region(tsl::tracing::EventCategory::kRunClosure, + t.f->trace_id); t.f->f(); } diff --git a/tensorflow/core/framework/tensor_matcher.cc b/tensorflow/core/framework/tensor_matcher.cc index 5e159eac21d6d7..c16f700d467e31 100644 --- a/tensorflow/core/framework/tensor_matcher.cc +++ b/tensorflow/core/framework/tensor_matcher.cc @@ -41,13 +41,32 @@ namespace { using tensorflow::Tensor; +template +::testing::Matcher> MakePointwiseMatcher( + absl::Span target) { + return ::testing::MatcherCast>( + ::testing::Pointwise(::testing::Eq(), target)); +} + +template <> +::testing::Matcher> MakePointwiseMatcher( + absl::Span target) { + return ::testing::MatcherCast>( + ::testing::Pointwise(::testing::FloatEq(), target)); +} + +template <> +::testing::Matcher> MakePointwiseMatcher( + absl::Span target) { + return ::testing::MatcherCast>( + ::testing::Pointwise(::testing::DoubleEq(), target)); +} + template bool MatchAndExplainPointwise(absl::Span value, absl::Span target, ::testing::MatchResultListener* listener) { - auto matcher = ::testing::MatcherCast>( - ::testing::Pointwise(::testing::Eq(), target)); - return matcher.MatchAndExplain(value, listener); + return MakePointwiseMatcher(target).MatchAndExplain(value, listener); } class TensorEqMatcherImpl : public ::testing::MatcherInterface { @@ -62,9 +81,7 @@ class TensorEqMatcherImpl : public ::testing::MatcherInterface { case tensorflow::DataTypeToEnum::value: { \ *os << ", and tensor data "; \ absl::Span data(target_.unaligned_flat()); \ - ::testing::MatcherCast>( \ - ::testing::Pointwise(::testing::Eq(), data)) \ - .DescribeTo(os); \ + MakePointwiseMatcher(data).DescribeTo(os); \ break; \ } TF_CALL_POD_STRING_TYPES(CASE_TYPE); @@ -84,9 +101,7 @@ class TensorEqMatcherImpl : public ::testing::MatcherInterface { case tensorflow::DataTypeToEnum::value: { \ *os << ", or tensor data "; \ absl::Span data(target_.unaligned_flat()); \ - ::testing::MatcherCast>( \ - ::testing::Pointwise(::testing::Eq(), data)) \ - .DescribeNegationTo(os); \ + MakePointwiseMatcher(data).DescribeNegationTo(os); \ break; \ } TF_CALL_POD_STRING_TYPES(CASE_TYPE); diff --git a/tensorflow/core/framework/tensor_matcher_test.cc b/tensorflow/core/framework/tensor_matcher_test.cc index 7e93bde4c964e8..614ea6e7ae353a 100644 --- a/tensorflow/core/framework/tensor_matcher_test.cc +++ b/tensorflow/core/framework/tensor_matcher_test.cc @@ -14,6 +14,7 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/tensor_matcher.h" +#include #include #include #include @@ -26,7 +27,9 @@ namespace tensorflow { namespace test { namespace { +using ::testing::DoubleEq; using ::testing::ElementsAre; +using ::testing::FloatEq; TEST(TensorMatcherTest, BasicPod) { std::vector expected; @@ -50,6 +53,34 @@ TEST(TensorMatcherTest, BasicString) { ElementsAre(TensorEq(Tensor(s1)), TensorEq(Tensor(s2)))); } +TEST(TensorMatcherTest, FloatComparisonUsesTolerance) { + // Two floats that are *nearly* equal. + float f1(1); + float f2 = std::nextafter(f1, f1 + 1); + + // Direct equality checks should fail, but use of the specialized `FloatEq` + // should succeed since this matcher applies ULP-based comparison. + // go/matchers#FpMatchers + ASSERT_NE(f1, f2); + ASSERT_THAT(f1, FloatEq(f2)); + + EXPECT_THAT(Tensor(f1), TensorEq(Tensor(f2))); +} + +TEST(TensorMatcherTest, DoubleComparisonUsesTolerance) { + // Two doubles that are *nearly* equal. + double d1(1); + double d2 = std::nextafter(d1, d1 + 1); + + // Direct equality checks should fail, but use of the specialized `DoubleEq` + // should succeed since this matcher applies ULP-based comparison. + // go/matchers#FpMatchers + ASSERT_NE(d1, d2); + ASSERT_THAT(d1, DoubleEq(d2)); + + EXPECT_THAT(Tensor(d1), TensorEq(Tensor(d2))); +} + } // namespace } // namespace test } // namespace tensorflow diff --git a/tensorflow/core/framework/types.h b/tensorflow/core/framework/types.h index 90e148a2e0784b..142ac9913ddc93 100644 --- a/tensorflow/core/framework/types.h +++ b/tensorflow/core/framework/types.h @@ -16,10 +16,12 @@ limitations under the License. #ifndef TENSORFLOW_CORE_FRAMEWORK_TYPES_H_ #define TENSORFLOW_CORE_FRAMEWORK_TYPES_H_ +#include #include #include #include +#include "absl/numeric/bits.h" #include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive #include "tensorflow/core/framework/bfloat16.h" #include "tensorflow/core/framework/full_type.pb.h" @@ -124,7 +126,7 @@ class DataTypeSet { if (pos_ < kNumBits) { uint32 remaining_mask = set_.mask_ >> pos_; if (remaining_mask != 0u) { - pos_ += ctz_uint32(remaining_mask); + pos_ += absl::countr_zero(remaining_mask); } } DCHECK_LE(pos_, kNumBits); @@ -137,39 +139,11 @@ class DataTypeSet { } }; - static uint32 ctz_uint32(uint32 x) { - DCHECK_NE(x, 0u); -#ifdef __GNUC__ - return __builtin_ctz(x); -#else - uint32 n = 0u; - while ((x & 1u) == 0u) { - x >>= 1; - ++n; - } - return n; -#endif - } - - static uint32 clz_uint32(uint32 x) { - DCHECK_NE(x, 0u); -#ifdef __GNUC__ - return __builtin_clz(x); -#else - uint32 n = 0u; - while ((x >> (kNumBits - 1u)) == 0u) { - x <<= 1; - ++n; - } - return n; -#endif - } - Iterator begin() const { // The begin position is the index of the first bit set to 1 in the entire // bit mask. If there are no bits set to 1, then the index is 0. if (mask_ != 0) { - return Iterator(*this, ctz_uint32(mask_)); + return Iterator(*this, absl::countr_zero(mask_)); } // The set is empty. return Iterator(*this, 0); @@ -179,25 +153,13 @@ class DataTypeSet { // The end position is the index of the highest bit that is set, plus 1. // If there are no bits set to 1, then the index is 0. if (mask_ != 0) { - return Iterator(*this, kNumBits - clz_uint32(mask_)); + return Iterator(*this, kNumBits - absl::countl_zero(mask_)); } // The set is empty. return Iterator(*this, 0); } - size_t size() const { -#if defined(__GNUC__) - return __builtin_popcount(mask_); -#else - size_t n = 0; - uint32 x = mask_; - while (x > 0) { - n += x & 1u; - x >>= 1; - } - return n; -#endif - } + size_t size() const { return absl::popcount(mask_); } constexpr DataTypeSet operator|(const DataTypeSet& other) const { return DataTypeSet(mask_ | other.mask_); diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD index 6f0a99f4faa473..95130050bae491 100644 --- a/tensorflow/core/kernels/BUILD +++ b/tensorflow/core/kernels/BUILD @@ -5160,7 +5160,7 @@ tf_kernel_library( tf_kernel_library( name = "sparse_reduce_op", prefix = "sparse_reduce_op", - deps = SPARSE_DEPS, + deps = SPARSE_DEPS + ["@com_google_absl//absl/status"], ) tf_kernel_library( diff --git a/tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h b/tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h index 28e8f4396fd668..9e6a4b2829aa5a 100644 --- a/tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h +++ b/tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h @@ -639,7 +639,7 @@ void AdaptiveSharedBatchScheduler::CallbackWrapper( "ProcessBatch", {{"batch_size_before_padding", batch->size()}, {"_r", 2} /*root_event*/}); }, - profiler::ContextType::kAdaptiveSharedBatchScheduler, + tsl::profiler::ContextType::kAdaptiveSharedBatchScheduler, batch->traceme_context_id()); const int64_t start_time = batch->creation_time_micros(); callback(std::unique_ptr>( @@ -798,7 +798,7 @@ Status ASBSQueue::Schedule(std::unique_ptr* task) { "ASBSQueue::Schedule", {{"batching_input_task_size", task_size}}); }, - profiler::ContextType::kAdaptiveSharedBatchScheduler, + tsl::profiler::ContextType::kAdaptiveSharedBatchScheduler, this->current_batch_->traceme_context_id()); current_batch_->AddTask(std::move(task)); num_enqueued_tasks_++; diff --git a/tensorflow/core/kernels/batching_util/shared_batch_scheduler.h b/tensorflow/core/kernels/batching_util/shared_batch_scheduler.h index 7e638224369e82..500dfc662ee359 100644 --- a/tensorflow/core/kernels/batching_util/shared_batch_scheduler.h +++ b/tensorflow/core/kernels/batching_util/shared_batch_scheduler.h @@ -959,7 +959,7 @@ Status Queue::ScheduleWithLazySplit(std::unique_ptr* task) { return profiler::TraceMeEncode("ScheduleOutputTask", {{"size", task_handles[i]->size()}}); }, - profiler::ContextType::kSharedBatchScheduler, + tsl::profiler::ContextType::kSharedBatchScheduler, task_handle_batches_.back()->traceme_context_id()); task_handle_batches_.back()->AddTask(std::move(task_handles[i])); @@ -1040,7 +1040,7 @@ Status Queue::ScheduleWithoutOrEagerSplitImpl( return profiler::TraceMeEncode("ScheduleOutputTask", {{"size", output_tasks[i]->size()}}); }, - profiler::ContextType::kSharedBatchScheduler, + tsl::profiler::ContextType::kSharedBatchScheduler, batches.back()->traceme_context_id()); batches.back()->AddTask(std::move(output_tasks[i])); } @@ -1332,7 +1332,7 @@ void Queue::ProcessBatch( "ProcessBatch", {{"batch_size_before_padding", batch->size()}, {"_r", 2} /*root_event*/}); }, - profiler::ContextType::kSharedBatchScheduler, + tsl::profiler::ContextType::kSharedBatchScheduler, batch->traceme_context_id()); if (std::holds_alternative( diff --git a/tensorflow/core/kernels/collective_nccl.cc b/tensorflow/core/kernels/collective_nccl.cc index 9a1cb900f3bf11..c44680b27124aa 100644 --- a/tensorflow/core/kernels/collective_nccl.cc +++ b/tensorflow/core/kernels/collective_nccl.cc @@ -18,7 +18,6 @@ limitations under the License. #include "tensorflow/core/common_runtime/collective_util.h" #include "tensorflow/core/nccl/nccl_manager.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" namespace tensorflow { diff --git a/tensorflow/core/kernels/collective_nccl_all_to_all.cc b/tensorflow/core/kernels/collective_nccl_all_to_all.cc index a24183ee71e40b..a531f19b976cec 100644 --- a/tensorflow/core/kernels/collective_nccl_all_to_all.cc +++ b/tensorflow/core/kernels/collective_nccl_all_to_all.cc @@ -18,7 +18,6 @@ limitations under the License. #include "tensorflow/core/common_runtime/collective_util.h" #include "tensorflow/core/nccl/nccl_manager.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" namespace tensorflow { diff --git a/tensorflow/core/kernels/collective_nccl_broadcaster.cc b/tensorflow/core/kernels/collective_nccl_broadcaster.cc index 12b7f5222a9dcf..d3a2ec9218a002 100644 --- a/tensorflow/core/kernels/collective_nccl_broadcaster.cc +++ b/tensorflow/core/kernels/collective_nccl_broadcaster.cc @@ -18,7 +18,6 @@ limitations under the License. #include "tensorflow/core/common_runtime/collective_util.h" #include "tensorflow/core/nccl/nccl_manager.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" namespace tensorflow { diff --git a/tensorflow/core/kernels/collective_nccl_gatherer.cc b/tensorflow/core/kernels/collective_nccl_gatherer.cc index a029e5bb6604f2..f6d8e63a748877 100644 --- a/tensorflow/core/kernels/collective_nccl_gatherer.cc +++ b/tensorflow/core/kernels/collective_nccl_gatherer.cc @@ -18,7 +18,6 @@ limitations under the License. #include "tensorflow/core/common_runtime/collective_util.h" #include "tensorflow/core/nccl/nccl_manager.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" namespace tensorflow { diff --git a/tensorflow/core/kernels/collective_nccl_reducer.cc b/tensorflow/core/kernels/collective_nccl_reducer.cc index 46fbc2dc12d087..ebc2129f4c4e03 100644 --- a/tensorflow/core/kernels/collective_nccl_reducer.cc +++ b/tensorflow/core/kernels/collective_nccl_reducer.cc @@ -18,7 +18,6 @@ limitations under the License. #include "tensorflow/core/common_runtime/collective_util.h" #include "tensorflow/core/nccl/nccl_manager.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" namespace tensorflow { diff --git a/tensorflow/core/kernels/conv_ops_gpu.cc b/tensorflow/core/kernels/conv_ops_gpu.cc index d1526b44aba08c..60238d5cd39f47 100644 --- a/tensorflow/core/kernels/conv_ops_gpu.cc +++ b/tensorflow/core/kernels/conv_ops_gpu.cc @@ -363,9 +363,9 @@ StatusOr> AutotuneUnfusedConv( return absl::InvalidArgumentError("No DNN in stream executor."); } if (!dnn->GetMIOpenConvolveAlgorithms( - kind, se::dnn::ToDataType::value, stream, input_desc, input_ptr, - filter_desc, filter_ptr, output_desc, output_ptr, conv_desc, - &scratch_allocator, &algorithms)) { + kind, se::dnn::ToDataType::value, se::dnn::ToDataType::value, + stream, input_desc, input_ptr, filter_desc, filter_ptr, output_desc, + output_ptr, conv_desc, &scratch_allocator, &algorithms)) { return errors::Unknown( "Failed to get convolution algorithm. This is probably " "because MIOpen failed to initialize, so try looking to " diff --git a/tensorflow/core/kernels/data/experimental/BUILD b/tensorflow/core/kernels/data/experimental/BUILD index f24cff66f1ef98..5adbe7abe25ee8 100644 --- a/tensorflow/core/kernels/data/experimental/BUILD +++ b/tensorflow/core/kernels/data/experimental/BUILD @@ -922,8 +922,9 @@ tf_kernel_library( "//tensorflow/core:framework_internal", "//tensorflow/core:lib", "//tensorflow/core:lib_internal", - "//tensorflow/core/data:dataset_utils", "//tensorflow/core/framework:dataset_options_proto_cc", + "@com_google_absl//absl/status", + "@com_google_absl//absl/strings", "@local_tsl//tsl/platform:errors", "@local_tsl//tsl/platform:mutex", "@local_tsl//tsl/platform:status", diff --git a/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc b/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc index 13173f183a9d8c..d88506b4176a29 100644 --- a/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc @@ -38,7 +38,6 @@ limitations under the License. #include "tensorflow/core/platform/env_time.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/stringprintf.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/profiler/lib/traceme_encode.h" diff --git a/tensorflow/core/kernels/data/experimental/unbatch_dataset_op.cc b/tensorflow/core/kernels/data/experimental/unbatch_dataset_op.cc index 4b4433ca27b27d..3cc332024328e8 100644 --- a/tensorflow/core/kernels/data/experimental/unbatch_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/unbatch_dataset_op.cc @@ -19,6 +19,8 @@ limitations under the License. #include #include +#include "absl/status/status.h" +#include "absl/strings/str_cat.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/dataset_options.pb.h" #include "tensorflow/core/framework/model.h" @@ -122,6 +124,32 @@ class UnbatchDatasetOp : public UnaryDatasetOpKernel { return input_->CheckExternalState(); } + absl::Status Get(OpKernelContext* ctx, int64_t index, + std::vector* out_tensors) const override { + TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index)); + if (batch_size_ <= 0) { + return absl::FailedPreconditionError(absl::StrCat( + "Random access for the `unbatch` dataset requires a known batch " + "size. Got ", + batch_size_, ".")); + } + + const int64_t input_index = index / batch_size_; + const int64_t input_offset = index % batch_size_; + std::vector input_tensors; + TF_RETURN_IF_ERROR(input_->Get(ctx, input_index, &input_tensors)); + for (int64_t i = 0; i < input_tensors.size(); ++i) { + const DataType& dtype = input_tensors[i].dtype(); + TensorShape shape = input_tensors[i].shape(); + shape.RemoveDim(0); + + out_tensors->emplace_back(ctx->get_allocator({}), dtype, shape); + TF_RETURN_IF_ERROR(batch_util::MaybeMoveSliceToElement( + &input_tensors[i], &out_tensors->back(), input_offset)); + } + return absl::OkStatus(); + } + protected: Status AsGraphDefInternal(SerializationContext* ctx, DatasetGraphDefBuilder* b, diff --git a/tensorflow/core/kernels/function_ops.cc b/tensorflow/core/kernels/function_ops.cc index 3f4bb1bb96ed4d..7c865b62d0452b 100644 --- a/tensorflow/core/kernels/function_ops.cc +++ b/tensorflow/core/kernels/function_ops.cc @@ -31,7 +31,6 @@ limitations under the License. #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/platform/macros.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/util/device_name_utils.h" diff --git a/tensorflow/core/kernels/mkl/BUILD b/tensorflow/core/kernels/mkl/BUILD index 2bf1f3406e6c67..4e4885fef167a2 100644 --- a/tensorflow/core/kernels/mkl/BUILD +++ b/tensorflow/core/kernels/mkl/BUILD @@ -95,6 +95,7 @@ tf_mkl_kernel_library( hdrs = [ "mkl_kernel_util.h", "mkl_matmul_ops_common.h", + "mkl_quantized_conv_ops.h", ], deps = [ "//tensorflow/core:graph", @@ -171,6 +172,7 @@ tf_cc_test_mkl( srcs = ["mkl_qmatmul_op_test.cc"], linkstatic = 1, # Fixes dyld error on MacOS. deps = [ + ":mkl_matmul_op", ":mkl_qmatmul_op", "//tensorflow/core:array_ops_op_lib", "//tensorflow/core:math_ops_op_lib", @@ -555,3 +557,21 @@ tf_cc_test_mkl( "//tensorflow/core/kernels/mkl:mkl_softmax_op", ] + MKL_TEST_DEPS, ) + +tf_cc_test_mkl( + name = "onednn_fused_matmul_ops_test", + size = "medium", + srcs = ["onednn_fused_matmul_ops_test.cc"], + linkstatic = 1, # Fixes dyld error on MacOS. + deps = [ + ":mkl_kernel_util", + ":mkl_matmul_op", + "//tensorflow/cc:cc_ops_internal", + "//tensorflow/core:direct_session", + "//tensorflow/core/kernels:bias_op", + "//tensorflow/core/kernels:matmul_op", + "//tensorflow/core/kernels:quantization_utils", + "//tensorflow/core/kernels:relu_op", + "@com_google_absl//absl/strings", + ] + MKL_TEST_DEPS, +) diff --git a/tensorflow/core/kernels/mkl/mkl_fused_ops_test.cc b/tensorflow/core/kernels/mkl/mkl_fused_ops_test.cc index 857a25bbe55e7a..6d709794b6fb92 100644 --- a/tensorflow/core/kernels/mkl/mkl_fused_ops_test.cc +++ b/tensorflow/core/kernels/mkl/mkl_fused_ops_test.cc @@ -1098,7 +1098,7 @@ class MklFusedMatMulCacheTest : public OpsTestBase { // Bias vector. AddInputFromArray(TensorShape({4}), {1, 2, 3, 4}); - using KernelType = MklDnnMatMulOpBase; + using KernelType = MklDnnMatMulOpBase; // Before the first time kernel execution, weight should be empty EXPECT_TRUE(static_cast(this->kernel_.get()) ->IsWeightCacheEmpty(this->context_.get())); diff --git a/tensorflow/core/kernels/mkl/mkl_kernel_util.cc b/tensorflow/core/kernels/mkl/mkl_kernel_util.cc index a4731ea7888337..504247ff6613ec 100644 --- a/tensorflow/core/kernels/mkl/mkl_kernel_util.cc +++ b/tensorflow/core/kernels/mkl/mkl_kernel_util.cc @@ -22,6 +22,7 @@ limitations under the License. #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/core/graph/node_builder.h" +#include "tensorflow/core/lib/core/errors.h" namespace tensorflow { @@ -40,13 +41,16 @@ void MklTestingUtil::RunMklQuantizeOp(const Tensor& input, Node* max_node = test::graph::Constant(&*graph, Tensor(max), "max"); Node* quantize_op; + string round_mode = + (mode == "SCALE") ? "HALF_TO_EVEN" : "HALF_AWAY_FROM_ZERO"; + TF_CHECK_OK(NodeBuilder("mkl_quantizeV2", "_MklQuantizeV2") .Input(input_node) .Input(min_node) .Input(max_node) .Attr("T", type) .Attr("mode", mode) - .Attr("round_mode", "HALF_TO_EVEN") + .Attr("round_mode", round_mode) .Attr("_kernel", "QuantizedMklOp") .Finalize(&*graph, &quantize_op)); diff --git a/tensorflow/core/kernels/mkl/mkl_kernel_util.h b/tensorflow/core/kernels/mkl/mkl_kernel_util.h index fb9df4d237c74b..da600fb001e038 100644 --- a/tensorflow/core/kernels/mkl/mkl_kernel_util.h +++ b/tensorflow/core/kernels/mkl/mkl_kernel_util.h @@ -49,6 +49,38 @@ class MklTestingUtil { *tensor_min = min(); *tensor_max = max(); } + + // This utility function mimics Quantization of float/bfloat16 tensor with + // oneDNN backend QuantizeV2 operation. Since the op signature requires min + // and max values to be in float type, min_tensor and max_tensor should have + // their dtype set to DT_FLOAT. + template + static Status GetQuantizationTensors(const Tensor& input, Tensor* output, + DataType out_type, const string mode, + Tensor* min_tensor, Tensor* max_tensor) { + if (min_tensor->dtype() != DT_FLOAT || max_tensor->dtype() != DT_FLOAT) { + return absl::UnimplementedError("Tensor must be float32."); + } + T min; + T max; + ComputeMinMax(input, &min, &max); + + float adjusted_min = static_cast(min); + float adjusted_max = static_cast(max); + if (mode == "SCALED") { + if (output->dtype() != DT_QINT8) { + return absl::UnimplementedError("Tensor must be QInt8 in SCALED mode."); + } + float range = std::max(std::abs(adjusted_min), std::abs(adjusted_max)); + adjusted_min = -range; + adjusted_max = range; + } + RunMklQuantizeOp(input, adjusted_min, adjusted_max, out_type, mode, output); + min_tensor->flat()(0) = adjusted_min; + max_tensor->flat()(0) = adjusted_max; + + return OkStatus(); + } }; #ifdef ENABLE_ONEDNN_V3 diff --git a/tensorflow/core/kernels/mkl/mkl_matmul_op_fused.cc b/tensorflow/core/kernels/mkl/mkl_matmul_op_fused.cc index 2d0065a52e5b4a..fb76ab765f1bf9 100644 --- a/tensorflow/core/kernels/mkl/mkl_matmul_op_fused.cc +++ b/tensorflow/core/kernels/mkl/mkl_matmul_op_fused.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,19 +19,29 @@ limitations under the License. // Multiplication (MatMul) with bias (BiasAdd) operations. #if defined(INTEL_MKL) +#include + +#include "oneapi/dnnl/dnnl.hpp" +#include "absl/container/inlined_vector.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/kernels/mkl/mkl_matmul_ops_common.h" +#include "tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h" #include "tensorflow/core/lib/core/errors.h" +#include "tensorflow/core/platform/errors.h" namespace tensorflow { // Fuse Operation -template -class MklFusedMatMulOp : public MklDnnMatMulOpBase { +template +class MklFusedMatMulOp : public MklDnnMatMulOpBase { public: explicit MklFusedMatMulOp(OpKernelConstruction* ctx) - : MklDnnMatMulOpBase(ctx) { + : MklDnnMatMulOpBase(ctx) { + if (std::is_same::value) { + return; // Quantized version will have own contstruction code. + } OP_REQUIRES_OK(ctx, ctx->GetAttr("fused_ops", &fused_ops_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_a", &transpose_a_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_b", &transpose_b_)); @@ -41,7 +51,6 @@ class MklFusedMatMulOp : public MklDnnMatMulOpBase { OP_REQUIRES_OK( ctx, ctx->GetAttr("is_filter_const", &(this->is_weight_const_))); } - OP_REQUIRES(ctx, fused_ops_.size() <= 2, absl::InvalidArgumentError( "MklFusedMatMul must have 2 post-arguments at most.")); @@ -54,7 +63,7 @@ class MklFusedMatMulOp : public MklDnnMatMulOpBase { ctx, transpose_a_ == false, absl::InvalidArgumentError("In[0] of MklMatMul can't be transposed.")); if (fused_ops_.size() == 2 && fused_ops_[1] == "LeakyRelu") { - OP_REQUIRES_OK(ctx, ctx->GetAttr("leakyrelu_alpha", &leakyrelu_alpha)); + OP_REQUIRES_OK(ctx, ctx->GetAttr("leakyrelu_alpha", &leakyrelu_alpha_)); } } @@ -64,7 +73,7 @@ class MklFusedMatMulOp : public MklDnnMatMulOpBase { const Tensor& weight_tensor = ctx->input(this->kInputIndexWeight); const Tensor& bias_tensor = MklGetInput(ctx, this->kInputIndexBias); - if (std::is_same::value) { + if (std::is_same::value) { (void)SetFPMathMode(); } @@ -134,15 +143,16 @@ class MklFusedMatMulOp : public MklDnnMatMulOpBase { memory::format_tag::nc, this->is_weight_const_); // Extend the basic parameters for data types and fusions. ExtendMklDnnMatMulFwdParams(ctx, matmul_params); - auto st = ExecuteSingleThreadedGemm(batch, channel, k, sizeof(T)); + auto st = ExecuteSingleThreadedGemm(batch, channel, k, sizeof(T1)); // Create the oneDNN wrapper over Eigen threadpool and set max threads // in oneDNN. Eigen::ThreadPoolInterface* eigen_interface = EigenThreadPoolFromTfContext(ctx); tsl::OneDnnThreadPool eigen_tp(eigen_interface, ThreadPoolUseCallerThread(), st ? 1 : -1); - MklDnnMatMulFwdPrimitive* matmul_prim = - MklDnnMatMulFwdPrimitiveFactory::Get(matmul_params, 0); + MklDnnMatMulFwdPrimitive* matmul_prim = + MklDnnMatMulFwdPrimitiveFactory::Get( + matmul_params, 0); // Allocate output tensor. Tensor* dst_tensor = nullptr; @@ -158,17 +168,17 @@ class MklFusedMatMulOp : public MklDnnMatMulOpBase { TensorShape output_tf_shape({batch, channel}); if (fuse_add_) { - const Tensor& add_tensor = MklGetInput(ctx, kInputIndex_Add); + const Tensor& add_tensor = MklGetInput(ctx, input_idx_add_); MklDnnShape add_mkl_shape; - GetMklShape(ctx, kInputIndex_Add, &add_mkl_shape, native_format); + GetMklShape(ctx, input_idx_add_, &add_mkl_shape, native_format); // For native format, we need not to set metadata. - if (native_format && ctx->forward_input_to_output_with_shape( - kInputIndex_Add, kOutputIndex_Dst, - output_tf_shape, &dst_tensor)) { + if (native_format && + ctx->forward_input_to_output_with_shape( + input_idx_add_, kOutputIndex_Dst, output_tf_shape, &dst_tensor)) { ; // Need to do nothing for native format } else if (!native_format && ForwardMklTensorInToOutWithMklShape( - ctx, kInputIndex_Add, kOutputIndex_Dst, + ctx, input_idx_add_, kOutputIndex_Dst, &dst_tensor, output_mkl_shape, false)) { ; // If it's not native format, need to forward and set meta first } else { @@ -182,19 +192,20 @@ class MklFusedMatMulOp : public MklDnnMatMulOpBase { auto add_md = add_mkl_shape.IsMklTensor() ? add_mkl_shape.GetMklLayout() - : memory::desc(dst_dims, MklDnnType(), output_format_tag); + : memory::desc(dst_dims, MklDnnType(), output_format_tag); auto dst_md = - memory::desc(dst_dims, MklDnnType(), output_format_tag); + memory::desc(dst_dims, MklDnnType(), output_format_tag); void* add_buf = - static_cast(const_cast(add_tensor.flat().data())); - void* dst_buf = static_cast((dst_tensor)->flat().data()); + static_cast(const_cast(add_tensor.flat().data())); + void* dst_buf = + static_cast((dst_tensor)->flat().data()); if (native_format) { // We are simply deep copying the add_tensor to dst_tensor without // changing memory layout, hence using same memory descriptor. add_md = dst_md = - memory::desc({add_tensor.NumElements()}, MklDnnType(), + memory::desc({add_tensor.NumElements()}, MklDnnType(), dnnl::memory::format_tag::x); } @@ -218,31 +229,33 @@ class MklFusedMatMulOp : public MklDnnMatMulOpBase { try { // Prepare the input and output for primitive. - T* src_data = const_cast(src_tensor.flat().data()); - T* weight_data = const_cast(weight_tensor.flat().data()); - T* bias_data = const_cast(bias_tensor.flat().data()); - T* dst_data = const_cast(dst_tensor->flat().data()); + T1* src_data = const_cast(src_tensor.flat().data()); + T2* weight_data = const_cast(weight_tensor.flat().data()); + void* bias_data = static_cast( + const_cast(bias_tensor.flat().data())); + Toutput* dst_data = + const_cast(dst_tensor->flat().data()); // Reorder input if necessary. - MklDnnData src_mkl(&(this->cpu_engine_)); - MklDnnData weight_mkl(&(this->cpu_engine_)); + MklDnnData src_mkl(&(this->cpu_engine_)); + MklDnnData weight_mkl(&(this->cpu_engine_)); auto src_md = src_mkl_shape.IsMklTensor() ? src_mkl_shape.GetMklLayout() - : memory::desc(src_dims, MklDnnType(), src_format); + : memory::desc(src_dims, MklDnnType(), src_format); if (src_md != matmul_pd->src_desc()) { src_mkl.SetUsrMem(src_md, src_data); src_mkl.CheckReorderToOpMem(matmul_pd.get()->src_desc(), this->cpu_engine_, ctx); - src_data = reinterpret_cast(src_mkl.GetOpMem().get_data_handle()); + src_data = static_cast(src_mkl.GetOpMem().get_data_handle()); } // Get cached data when weight is const. const memory::desc weight_md = - memory::desc(weight_dims, MklDnnType(), weight_format); + memory::desc(weight_dims, MklDnnType(), weight_format); if (weight_md != matmul_pd->weights_desc()) { - T* cached_weight_data = nullptr; + T2* cached_weight_data = nullptr; if (this->is_weight_const_) { // TODO(intel-tf): When oneDNN major version changes to v4.x, weight @@ -268,16 +281,22 @@ class MklFusedMatMulOp : public MklDnnMatMulOpBase { weight_mkl.CheckReorderToOpMem(matmul_pd.get()->weights_desc(), this->cpu_engine_, ctx); weight_data = - reinterpret_cast(weight_mkl.GetOpMem().get_data_handle()); + static_cast(weight_mkl.GetOpMem().get_data_handle()); } } std::shared_ptr cpu_stream; - cpu_stream.reset(CreateStream(&eigen_tp, matmul_prim->GetEngine())); UserScratchPad scratch_pad; scratch_pad.AllocateSPTensor(matmul_prim, ctx); + // Temporary tensor for scaled bias when op is quantized version. + Tensor temp_scaled_bias_tensor; + if (std::is_same::value) { + this->GetScaledBias(ctx, matmul_pd, bias_tensor, + &temp_scaled_bias_tensor, &bias_data); + } + // Execute fused matmul op. matmul_prim->Execute(src_data, weight_data, bias_data, dst_data, matmul_params, scratch_pad.Get(), cpu_stream); @@ -290,30 +309,31 @@ class MklFusedMatMulOp : public MklDnnMatMulOpBase { } } - void ExtendMklDnnMatMulFwdParams(OpKernelContext* ctx, - MklDnnMatMulFwdParams& params) { + virtual void ExtendMklDnnMatMulFwdParams(OpKernelContext* ctx, + MklDnnMatMulFwdParams& params) { + // Create a string from data types of input, weight, bias, and output. + params.dtypes.append(typeid(T1).name()); + params.dtypes.append(typeid(T2).name()); + params.dtypes.append(typeid(Tbias).name()); + params.dtypes.append(typeid(Toutput).name()); if (fused_ops_.size() == 2) { string post_op = fused_ops_[1]; - - if (post_op == "Relu") { - params.post_op_params.push_back({"relu", {1.0, 0.0, 0.0}}); - } else if (post_op == "Relu6") { - params.post_op_params.push_back({"relu6", {1.0, 6.0, 0.0}}); + float scale = 1.0f; + float alpha = 0.0f; + float beta = 0.0f; + if (post_op == "Relu6") { + alpha = 6.0f; + } else if (post_op == "LeakyRelu") { + alpha = leakyrelu_alpha_; } else if (post_op == "Elu") { - params.post_op_params.push_back({"elu", {1.0, 1.0, 0.0}}); - } else if (post_op == "GeluApproximate") { - params.post_op_params.push_back({"gelu_approximate", {1.0, 1.0, 0.0}}); - } else if (post_op == "GeluExact") { - params.post_op_params.push_back({"gelu_exact", {1.0, 1.0, 0.0}}); - } else if (post_op == "Tanh") { - params.post_op_params.push_back({"tanh", {1.0, 0.0, 0.0}}); + alpha = 1.0f; + } + if (post_op == "Relu" || post_op == "Relu6" || post_op == "LeakyRelu" || + post_op == "Elu" || post_op == "GeluApproximate" || + post_op == "GeluExact" || post_op == "Tanh" || post_op == "Sigmoid") { + params.post_op_params.push_back({post_op, {scale, alpha, beta}}); } else if (post_op == "Add") { params.post_op_params.push_back({"sum", {1.0}}); - } else if (post_op == "LeakyRelu") { - params.post_op_params.push_back( - {"leakyrelu", {1.0, leakyrelu_alpha, 0.0}}); - } else if (post_op == "Sigmoid") { - params.post_op_params.push_back({"logistic", {1.0, 0.0, 0.0}}); } else { OP_REQUIRES_OK(ctx, absl::InvalidArgumentError(absl::StrCat( "Unsupported post-argument in MklFusedMatMul: ", @@ -322,34 +342,630 @@ class MklFusedMatMulOp : public MklDnnMatMulOpBase { } } - private: + protected: + virtual void GetScaledBias( + OpKernelContext*, + std::shared_ptr&, + const Tensor&, Tensor*, void**) {} + bool fuse_add_ = false; bool transpose_a_; bool transpose_b_; - float leakyrelu_alpha = 0.2; + float leakyrelu_alpha_ = 0.2; std::vector fused_ops_; - const int kInputIndex_Add = 3; + int input_idx_add_ = 3; const int kOutputIndex_Dst = 0; -}; // namespace tensorflow +#ifdef DNNL_AARCH64_USE_ACL + const int kWeightTensorHashLength = 1024; +#endif +}; + +namespace { + +enum class FusedComputationType { + kUndefined, + kBiasAdd, + kBiasAdd_Dequantize, + kBiasAdd_Requantize, + kBiasAdd_Activation, + kBiasAdd_Activation_Dequantize, + kBiasAdd_Activation_Requantize, + kBiasAdd_Add, + kBiasAdd_Add_Dequantize, + kBiasAdd_Add_Requantize, +}; + +struct FusedComputationPattern { + FusedComputationType fused_computation; + std::vector fused_ops; +}; + +} // namespace + +// OneDNN uses post-ops to implement different kind of fusions. The category of +// each individual post-op can be inferred from the fused_ops attribute. The +// following enum is used to identify list of required post-ops. +enum class PostOpKind { kActivation, kSum, kOutputScale, kLinear }; + +template +class QuantizedFusedMatMulOp + : public MklFusedMatMulOp { + protected: + string input_quant_mode_; // 0-th input + string output_quant_mode_; // 0-th output + string activation_type_; // Activation op type + + // Initialize minmax tensor indices with default values for the most common + // cases. + int input_min_idx_ = 3; + int input_max_idx_ = 4; + int weight_min_idx_ = 5; + int weight_max_idx_ = 6; + + struct PostOpInfo { + PostOpKind post_op_kind; + struct OperandInfo { + int idx = -1; // Operand tensor index if needed by a post-op. + // Indices of min and max value tensors, if the operand is quantized. + absl::InlinedVector min_max_indices; + } operand_info; + // Indices of output min and max value tensors. It is used when requantize + // is fused. + absl::InlinedVector min_max_indices; + }; + + absl::InlinedVector post_op_info_list_; + + void Initialize(OpKernelConstruction* context) { + OP_REQUIRES_OK(context, + context->GetAttr("transpose_a", &this->transpose_a_)); + OP_REQUIRES_OK(context, + context->GetAttr("transpose_b", &this->transpose_b_)); + OP_REQUIRES_OK(context, + context->GetAttr("input_quant_mode", &input_quant_mode_)); + OP_REQUIRES_OK(context, + context->GetAttr("output_quant_mode", &output_quant_mode_)); + OP_REQUIRES_OK( + context, context->GetAttr("is_weight_const", &this->is_weight_const_)); + OP_REQUIRES_OK(context, + context->GetAttr("is_bias_const", &this->is_bias_const_)); + if (context->HasAttr("leakyrelu_alpha")) { + OP_REQUIRES_OK(context, context->GetAttr("leakyrelu_alpha", + &this->leakyrelu_alpha_)); + } + + // Extract activation info and canonicalize activation types to + // common name "Activation" in the fused_ops attribute. + std::vector fused_ops; + OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops)); + for (auto it = fused_ops.begin(); it != fused_ops.end(); ++it) { + if (*it == "Relu" || *it == "Relu6" || *it == "Elu" || + *it == "GeluApproximate" || *it == "GeluExact" || *it == "Tanh" || + *it == "LeakyRelu" || *it == "Sigmoid") { + if (*it != "Relu") { + string last_fusion = fused_ops.back(); + OP_REQUIRES( + context, + (last_fusion == "Dequantize" || last_fusion == "Requantize"), + absl::UnimplementedError(absl::StrCat( + "Nonlinear activation except Relu can be ", + "supported only with Dequantize or Requantize fusion."))); + } + activation_type_ = *it; + // Canonicalize all activation types into "Activation" for simplifying + // post ops construction. + *it = "Activation"; + } + } + + using FCT = FusedComputationType; + + // TODO(intel-tf): Add more patterns when implemented. + std::vector patterns{ + {FCT::kBiasAdd, {"BiasAdd"}}, + {FCT::kBiasAdd_Dequantize, {"BiasAdd", "Dequantize"}}, + {FCT::kBiasAdd_Requantize, {"BiasAdd", "Requantize"}}, + {FCT::kBiasAdd_Activation, {"BiasAdd", "Activation"}}, + {FCT::kBiasAdd_Activation_Dequantize, + {"BiasAdd", "Activation", "Dequantize"}}, + {FCT::kBiasAdd_Activation_Requantize, + {"BiasAdd", "Activation", "Requantize"}}, + {FCT::kBiasAdd_Add_Dequantize, {"BiasAdd", "Add", "Dequantize"}}, + }; + + FusedComputationType fused_computation = FusedComputationType::kUndefined; + for (const auto& pattern : patterns) { + if (fused_ops == pattern.fused_ops) { + fused_computation = pattern.fused_computation; + break; + } + } + + // Configure oneDNN post ops + switch (fused_computation) { + case FCT::kBiasAdd: + // No post op is required. + OP_REQUIRES(context, (std::is_same::value), + absl::UnimplementedError(absl::StrCat( + "Qunatized fusion: [", absl::StrJoin(fused_ops, ","), + "] needs output in qint32."))); + break; + case FCT::kBiasAdd_Dequantize: + post_op_info_list_ = {{PostOpKind::kOutputScale, {}, {}}}; + break; + case FCT::kBiasAdd_Requantize: + post_op_info_list_ = {{PostOpKind::kOutputScale, {}, {}}, + {PostOpKind::kLinear, {}, {7, 8}}}; + break; + case FCT::kBiasAdd_Activation: + OP_REQUIRES(context, + (std::is_same::value && + activation_type_ == "Relu"), + absl::UnimplementedError(absl::StrCat( + "Qunatized fusion: [", absl::StrJoin(fused_ops, ","), + "] needs output in qint32 and ", + "activation supported is only Relu"))); + post_op_info_list_ = {{PostOpKind::kActivation, {}, {}}}; + break; + case FCT::kBiasAdd_Activation_Dequantize: + post_op_info_list_ = {{PostOpKind::kOutputScale, {}, {}}, + {PostOpKind::kActivation, {}, {}}}; + break; + case FCT::kBiasAdd_Activation_Requantize: + post_op_info_list_ = {{PostOpKind::kOutputScale, {}, {}}, + {PostOpKind::kActivation, {}, {}}, + {PostOpKind::kLinear, {}, {7, 8}}}; + break; + case FCT::kBiasAdd_Add_Dequantize: { + OP_REQUIRES( + context, + (std::is_same::value || std::is_same::value), + absl::UnimplementedError( + "Quantized addend tensor is not implemented yet.")); + // Addend tensor precedes all minmax tensors. Shift the indices from + // default initilized values. + input_min_idx_ += 1; + input_max_idx_ += 1; + weight_min_idx_ += 1; + weight_max_idx_ += 1; + post_op_info_list_ = {{PostOpKind::kOutputScale, {}, {}}, + {PostOpKind::kSum, {3, {}}, {}}}; + } break; + default: + OP_REQUIRES(context, false, + absl::UnimplementedError( + absl::StrCat("Fusion is not implemented: [", + absl::StrJoin(fused_ops, ","), "]"))); + } + } + + public: + explicit QuantizedFusedMatMulOp(OpKernelConstruction* context) + : MklFusedMatMulOp(context) { + Initialize(context); + } + + void Compute(OpKernelContext* ctx) override { + MklFusedMatMulOp::Compute(ctx); + // Compute additional outputs + if (std::is_same::value || + std::is_same::value || + std::is_same::value) { + Tensor* min_output = nullptr; + Tensor* max_output = nullptr; + + const float min_input = ctx->input(input_min_idx_).flat()(0); + const float max_input = ctx->input(input_max_idx_).flat()(0); + const Tensor& min_weight = ctx->input(weight_min_idx_); + const Tensor& max_weight = ctx->input(weight_max_idx_); + OP_REQUIRES(ctx, min_weight.shape() == max_weight.shape(), + absl::InvalidArgumentError( + "Shape of min-weight and max-weight must be same.")); + + if (std::is_same::value) { + TensorShape output_minmax_shape = min_weight.shape(); + OP_REQUIRES_OK( + ctx, ctx->allocate_output(1, output_minmax_shape, &min_output)); + OP_REQUIRES_OK( + ctx, ctx->allocate_output(2, output_minmax_shape, &max_output)); + if (min_weight.dims() == 0) { + float min_output_value; + float max_output_value; + MklQuantizationRangeForMultiplication( + min_input, max_input, min_weight.flat()(0), + max_weight.flat()(0), &min_output_value, + &max_output_value); + min_output->flat()(0) = min_output_value; + max_output->flat()(0) = max_output_value; + } else { + MklQuantizationRangeForMultiplication( + min_input, max_input, min_weight, max_weight, &min_output, + &max_output); + } + } else { + // When output type is qint8 or quint8, the kernel is registered for + // Requantize fusion. + OP_REQUIRES_OK(ctx, ctx->allocate_output(1, {}, &min_output)); + OP_REQUIRES_OK(ctx, ctx->allocate_output(2, {}, &max_output)); + int output_min_idx = ctx->num_inputs() - 2; + int output_max_idx = ctx->num_inputs() - 1; + const float requested_min = ctx->input(output_min_idx).flat()(0); + const float requested_max = ctx->input(output_max_idx).flat()(0); + if (output_quant_mode_ == "SCALED") { + const float range_output = + std::max(std::abs(requested_min), std::abs(requested_max)); + if (std::is_same::value) { + min_output->flat()(0) = -range_output; + max_output->flat()(0) = range_output; + } else { + min_output->flat()(0) = 0; + max_output->flat()(0) = range_output; + } + } else { + min_output->flat()(0) = requested_min; + max_output->flat()(0) = requested_max; + } + } + } else if (std::is_same::value || + std::is_same::value) { + // Kernel is registered for Dequantization fusion. Nothing to do. + } else { + OP_REQUIRES_OK(ctx, + absl::InvalidArgumentError("Unsupported output type.")); + } + } + + void ExtendMklDnnMatMulFwdParams(OpKernelContext* ctx, + MklDnnMatMulFwdParams& params) override { + // Create a string from data types of input, weight, bias, and output. + params.dtypes.append(typeid(T1).name()); + params.dtypes.append(typeid(T2).name()); + params.dtypes.append(typeid(Tbias).name()); + params.dtypes.append(typeid(Toutput).name()); + + params.input_quant_mode = input_quant_mode_; + + for (const auto& post_op_info : post_op_info_list_) { + auto post_op_kind = post_op_info.post_op_kind; + switch (post_op_kind) { + case PostOpKind::kOutputScale: { + if constexpr (std::is_same::value) { + // No scaling is required. + break; + } + const float min_input = ctx->input(input_min_idx_).flat()(0); + const float max_input = ctx->input(input_max_idx_).flat()(0); + const Tensor& min_weight_tensor = ctx->input(weight_min_idx_); + const Tensor& max_weight_tensor = ctx->input(weight_max_idx_); + const float* min_weight = min_weight_tensor.flat().data(); + const float* max_weight = max_weight_tensor.flat().data(); + const size_t num_weight_scales = min_weight_tensor.NumElements(); + + const float max_int8_input = + (std::is_same::value) ? 255.0f : 127.0f; + const float max_int8_weight = + (std::is_same::value) ? 255.0f : 127.0f; + const float range_input = + (input_quant_mode_ == "MIN_FIRST") + ? max_input - min_input + : std::max(std::abs(min_input), std::abs(max_input)); + + const float src_scale = range_input / max_int8_input; + std::vector wei_scales(num_weight_scales); +#ifndef ENABLE_ONEDNN_V3 + std::vector output_scales(num_weight_scales); +#endif // ENABLE_ONEDNN_V3 + for (size_t i = 0; i < num_weight_scales; ++i) { + float range_weight = + std::max(std::abs(min_weight[i]), std::abs(max_weight[i])); + wei_scales[i] = range_weight / max_int8_weight; +#ifndef ENABLE_ONEDNN_V3 + output_scales[i] = src_scale * wei_scales[i]; +#endif // ENABLE_ONEDNN_V3 + } + FactoryKeyCreator src_partial_key; + src_partial_key.AddAsKey(min_input); + src_partial_key.AddAsKey(max_input); + + FactoryKeyCreator wei_partial_key; + wei_partial_key.AddAsKey(min_weight); + wei_partial_key.AddAsKey(max_weight); +#ifndef ENABLE_ONEDNN_V3 + FactoryKeyCreator output_scales_partial_key; + output_scales_partial_key.AddAsKey(src_partial_key.GetKey()); + output_scales_partial_key.AddAsKey(wei_partial_key.GetKey()); + params.post_op_params.push_back({"output_scale", output_scales, + output_scales_partial_key.GetKey()}); +#else + params.post_op_params.push_back( + {"src_scale", {src_scale}, src_partial_key.GetKey()}); + params.post_op_params.push_back( + {"wei_scale", wei_scales, wei_partial_key.GetKey()}); +#endif // ENABLE_ONEDNN_V3 + } break; + + case PostOpKind::kActivation: { + float scale = 1.0f; + float alpha = 0.0f; + float beta = 0.0f; + if (activation_type_ == "LeakyRelu") + alpha = this->leakyrelu_alpha_; + else if (activation_type_ == "Relu6") + alpha = 6.0f; + else if (activation_type_ == "Elu") + alpha = 1.0f; + params.post_op_params.push_back( + {activation_type_, {scale, alpha, beta}}); + } break; + + case PostOpKind::kLinear: { + // Update output_scale for requantize fusion. + auto output_min_idx = post_op_info.min_max_indices[0]; + auto output_max_idx = post_op_info.min_max_indices[1]; + const float min_output = + ctx->input(output_min_idx).template flat()(0); + const float max_output = + ctx->input(output_max_idx).template flat()(0); + const float max_int8_output = + (std::is_same::value) ? 255.0f : 127.0f; + const float range_output = + (output_quant_mode_ == "MIN_FIRST") + ? max_output - min_output + : std::max(std::abs(min_output), std::abs(max_output)); + float req_scale = max_int8_output / range_output; + float req_shift = 0.0f; + if (output_quant_mode_ == "MIN_FIRST") { + req_shift = -min_output * max_int8_output / range_output; + } + params.post_op_params.push_back( + {"linear", {1.0, req_scale, req_shift}}); + } break; + + case PostOpKind::kSum: { + this->fuse_add_ = true; + this->input_idx_add_ = post_op_info.operand_info.idx; + params.post_op_params.push_back({"sum", {1.0}}); + } break; + + default: + OP_REQUIRES_OK( + ctx, absl::InvalidArgumentError("Unsupported post-op-kind.")); + } + } + } + + void GetScaledBias( + OpKernelContext* ctx, + std::shared_ptr& matmul_pd, + const Tensor& bias_tensor, Tensor* temp_scaled_bias_tensor, + void** bias_data) override { +#ifdef ENABLE_ONEDNN_V3 +#define TSCALED_BIAS float +#else +#define TSCALED_BIAS Tbias +#endif // ENABLE_ONEDNN_V3 + +#ifndef ENABLE_ONEDNN_V3 + if (std::is_same::value) { + // Bias already has been scaled for quantized input and weight. +#else + if ((std::is_same::value || + std::is_same::value) && + input_quant_mode_ == "SCALED") { +#endif // !ENABLE_ONEDNN_V3 + return; + } else { + const float min_input = ctx->input(input_min_idx_).flat()(0); + const float max_input = ctx->input(input_max_idx_).flat()(0); + const Tensor& min_weight_tensor = ctx->input(weight_min_idx_); + const Tensor& max_weight_tensor = ctx->input(weight_max_idx_); + const float* min_weight = min_weight_tensor.flat().data(); + const float* max_weight = max_weight_tensor.flat().data(); + bool is_cached_bias_valid = false; + bool is_bias_cache_empty = this->IsBiasCacheEmpty(); + if (!is_bias_cache_empty) { + this->GetCachedBias(min_input, max_input, bias_data); + is_cached_bias_valid = (*bias_data != nullptr); + } + if (!is_cached_bias_valid) { + void* input_bias_buf = static_cast( + const_cast(bias_tensor.flat().data())); + auto scaled_bias_md = matmul_pd->bias_desc(); + TensorShape scaled_bias_shape; + scaled_bias_shape.AddDim((scaled_bias_md.get_size() / sizeof(float))); + OP_REQUIRES_OK(ctx, ctx->allocate_temp( + DataTypeToEnum::v(), + scaled_bias_shape, temp_scaled_bias_tensor)); + void* scaled_bias_buf = static_cast( + temp_scaled_bias_tensor->flat().data()); + + const float max_int8_input = + (std::is_same::value) ? 255.0f : 127.0f; + const float max_int8_weight = + (std::is_same::value) ? 255.0f : 127.0f; + const float range_input = + (input_quant_mode_ == "MIN_FIRST") + ? max_input - min_input + : std::max(std::abs(min_input), std::abs(max_input)); + const size_t num_weight_scales = min_weight_tensor.NumElements(); + std::vector bias_scales(num_weight_scales, 1.0); + for (size_t i = 0; i < num_weight_scales; ++i) { + float range_weight = + std::max(std::abs(min_weight[i]), std::abs(max_weight[i])); + float scale_factor = + (max_int8_input * max_int8_weight) / (range_input * range_weight); + bias_scales[i] = scale_factor; + } + if (input_quant_mode_ == "MIN_FIRST") { + Tbias* input_bias = (Tbias*)input_bias_buf; + TSCALED_BIAS* adjusted_bias = (TSCALED_BIAS*)scaled_bias_buf; + float q_min_input = max_int8_input * min_input / range_input; + const Tensor& weight_tensor = ctx->input(1); + int stride_ic = 1; + int stride_oc = 1; + int k = 0; + int n = 0; + if (this->transpose_b_) { + k = weight_tensor.dim_size(1); + n = weight_tensor.dim_size(0); + stride_ic = 1; + stride_oc = k; + } else { + k = weight_tensor.dim_size(0); + n = weight_tensor.dim_size(1); + stride_ic = n; + stride_oc = 1; + } + T2* weight_buf = const_cast(weight_tensor.flat().data()); + std::vector scales(n); + if (num_weight_scales == 1) { + // Weights are quantized per_tensor. Scales need to be expanded to + // number of output channels. + std::fill(scales.begin(), scales.end(), bias_scales[0]); + } else { + scales = bias_scales; + } + // TODO(intel-tf): Paralellize loop for large weights. + for (int j = 0; j < n; ++j) { + int sum = 0; + for (int i = 0; i < k; ++i) { + sum += weight_buf[i * stride_ic + j * stride_oc]; + } +#ifndef ENABLE_ONEDNN_V3 + adjusted_bias[j] = static_cast( + (static_cast(input_bias[j]) * scales[j]) + + (sum * q_min_input)); +#else + // TODO(intel-tf): Use zeropoint for quantized input tensor instead + // of manual adjustments. + if (std::is_same::value) { + // Starting with oneDNN v3.0, bias is expected to be dequantized + // to float32. + adjusted_bias[j] = static_cast(input_bias[j]) / scales[j]; + } else { + // Bias is float32 or bfloat16 but still needs to be compensated. + adjusted_bias[j] = static_cast(input_bias[j]) + + ((sum * q_min_input) / scales[j]); + } +#endif // !ENABLE_ONEDNN_V3 + } + } else { + memory::dims input_bias_dims = + memory::dims({bias_tensor.shape().dim_size(0)}); + auto input_bias_md = dnnl::memory::desc( + input_bias_dims, MklDnnType(), memory::format_tag::x); + auto input_bias_mem = + dnnl::memory(input_bias_md, this->cpu_engine_, input_bias_buf); + auto scaled_bias_mem = + dnnl::memory(scaled_bias_md, this->cpu_engine_, scaled_bias_buf); + dnnl::primitive_attr bias_attr; +#ifndef ENABLE_ONEDNN_V3 + (num_weight_scales == 1) + ? bias_attr.set_output_scales(0, bias_scales) + : bias_attr.set_output_scales(1, bias_scales); +#else + (num_weight_scales == 1) ? bias_attr.set_scales_mask(DNNL_ARG_SRC, 0) + : bias_attr.set_scales_mask(DNNL_ARG_SRC, 1); +#endif // !ENABLE_ONEDNN_V3 + auto reorder_prim = + dnnl::reorder(input_bias_mem, scaled_bias_mem, bias_attr); + std::unordered_map reorder_net_args = { + {DNNL_ARG_FROM, input_bias_mem}, {DNNL_ARG_TO, scaled_bias_mem}}; +#ifdef ENABLE_ONEDNN_V3 + auto scale_mem = + memory({{1}, MklDnnType(), memory::format_tag::x}, + this->cpu_engine_, bias_scales.data()); + reorder_net_args.insert( + {DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, scale_mem}); +#endif // ENABLE_ONEDNN_V3 + reorder_prim.execute(dnnl::stream(this->cpu_engine_), + reorder_net_args); + } + + *bias_data = temp_scaled_bias_tensor->flat().data(); + + if (is_bias_cache_empty) { + // Only try to cache the bias in the first iteration. + this->CacheBias(ctx, *temp_scaled_bias_tensor, min_input, max_input); + } + } + } + } + + bool IsCachedBiasValid(float current_min_input, + float current_max_input) override + TF_LOCKS_EXCLUDED(this->bias_cache_mutex_) { + tf_shared_lock lock(this->bias_cache_mutex_); + if (this->is_bias_const_ && this->is_weight_const_ && + std::abs(current_min_input - this->saved_min_input_) < 1e-5 && + std::abs(current_max_input - this->saved_max_input_) < 1e-5) { + return true; + } + return false; + } +}; // Register mkl kernels for supported operations and types. -#define REGISTER_FUSEDMATMUL_MKL_SUPPORTED_KERNELS_TYPES(type) \ - REGISTER_KERNEL_BUILDER( \ - Name("_MklFusedMatMul") \ - .Device(DEVICE_CPU) \ - .TypeConstraint("T") \ - .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ - MklFusedMatMulOp); \ - REGISTER_KERNEL_BUILDER(Name("_MklNativeFusedMatMul") \ - .Device(DEVICE_CPU) \ - .TypeConstraint("T") \ - .Label(mkl_op_registry::kMklNameChangeOpLabel), \ - MklFusedMatMulOp); +#define REGISTER_FUSEDMATMUL_MKL_SUPPORTED_KERNELS_TYPES(type) \ + REGISTER_KERNEL_BUILDER( \ + Name("_MklFusedMatMul") \ + .Device(DEVICE_CPU) \ + .TypeConstraint("T") \ + .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ + MklFusedMatMulOp); \ + REGISTER_KERNEL_BUILDER( \ + Name("_MklNativeFusedMatMul") \ + .Device(DEVICE_CPU) \ + .TypeConstraint("T") \ + .Label(mkl_op_registry::kMklNameChangeOpLabel), \ + MklFusedMatMulOp); TF_CALL_float(REGISTER_FUSEDMATMUL_MKL_SUPPORTED_KERNELS_TYPES); TF_CALL_bfloat16(REGISTER_FUSEDMATMUL_MKL_SUPPORTED_KERNELS_TYPES); TF_CALL_half(REGISTER_FUSEDMATMUL_MKL_SUPPORTED_KERNELS_TYPES); #undef REGISTER_FUSEDMATMUL_MKL_SUPPORTED_KERNELS_TYPES +#define REGISTER_QUANTIZED_MATMUL(input_type, weight_type, bias_type, \ + output_type, additional_type) \ + REGISTER_KERNEL_BUILDER( \ + Name("_QuantizedMatMul") \ + .Device(DEVICE_CPU) \ + .TypeConstraint("T1") \ + .TypeConstraint("T2") \ + .TypeConstraint("Tbias") \ + .TypeConstraint("Tout") \ + .TypeConstraint("U"), \ + QuantizedFusedMatMulOp); + +#define REGISTER_ALL_OUTPUT_TYPES(input_type, weight_type, bias_type, \ + additional_type) \ + REGISTER_QUANTIZED_MATMUL(input_type, weight_type, bias_type, qint8, \ + additional_type) \ + REGISTER_QUANTIZED_MATMUL(input_type, weight_type, bias_type, quint8, \ + additional_type) \ + REGISTER_QUANTIZED_MATMUL(input_type, weight_type, bias_type, qint32, \ + additional_type) \ + REGISTER_QUANTIZED_MATMUL(input_type, weight_type, bias_type, float, \ + additional_type) \ + REGISTER_QUANTIZED_MATMUL(input_type, weight_type, bias_type, bfloat16, \ + additional_type) + +#define REGISTER_ALL_BIAS_OUTPUT_TYPES(input_type, weight_type, \ + additional_type) \ + REGISTER_ALL_OUTPUT_TYPES(input_type, weight_type, float, additional_type) \ + REGISTER_ALL_OUTPUT_TYPES(input_type, weight_type, bfloat16, \ + additional_type) \ + REGISTER_ALL_OUTPUT_TYPES(input_type, weight_type, qint32, additional_type) + +#define REGISTER_ALL_INPUT_BIAS_OUTPUT_TYPES(weight_type, additional_type) \ + REGISTER_ALL_BIAS_OUTPUT_TYPES(qint8, weight_type, additional_type) \ + REGISTER_ALL_BIAS_OUTPUT_TYPES(quint8, weight_type, additional_type) + +REGISTER_ALL_INPUT_BIAS_OUTPUT_TYPES(qint8, float); +REGISTER_ALL_INPUT_BIAS_OUTPUT_TYPES(qint8, bfloat16); + } // namespace tensorflow #endif // INTEL_MKL diff --git a/tensorflow/core/kernels/mkl/mkl_matmul_ops_common.h b/tensorflow/core/kernels/mkl/mkl_matmul_ops_common.h index 922e6464663bb5..8af21582ee9652 100644 --- a/tensorflow/core/kernels/mkl/mkl_matmul_ops_common.h +++ b/tensorflow/core/kernels/mkl/mkl_matmul_ops_common.h @@ -102,8 +102,10 @@ struct MklDnnMatMulFwdParams { struct PostOpParam { string name; std::vector param; + string partial_key; }; std::vector post_op_params; + string input_quant_mode; MklDnnMatMulFwdParams( memory::dims src_dims, memory::dims weight_dims, memory::dims bias_dims, @@ -244,7 +246,7 @@ class MklDnnMatMulFwdPrimitive : public MklPrimitive { dst_scale_mem(nullptr), #ifndef ENABLE_ONEDNN_V3 fwd_desc(nullptr), -#endif // !ENABLE_ONEDNN_V3 +#endif // ENABLE_ONEDNN_V3 fwd_pd(nullptr), src_md(nullptr), weight_md(nullptr), @@ -276,15 +278,26 @@ class MklDnnMatMulFwdPrimitive : public MklPrimitive { MklDnnType(), matmul_fwd_params.dst_format)); - if (std::is_same::value) { - context_.bias_md.reset(new memory::desc({matmul_fwd_params.bias_dims}, - MklDnnType(), - memory::format_tag::any)); + memory::data_type bias_dt; +#ifndef ENABLE_ONEDNN_V3 + bias_dt = MklDnnType(); +#else + if (std::is_same::value) { + // For QuantizedMatMul, bias needs to be passed to oneDNN as float of + // bfloat16 (even if Tbias is qint32). + if (std::is_same::value && + matmul_fwd_params.input_quant_mode == "SCALED") { + bias_dt = MklDnnType(); + } else { + bias_dt = MklDnnType(); + } } else { - context_.bias_md.reset(new memory::desc({matmul_fwd_params.bias_dims}, - MklDnnType(), - memory::format_tag::any)); + bias_dt = MklDnnType(); } +#endif // !ENABLE_ONEDNN_V3 + context_.bias_md.reset(new memory::desc({matmul_fwd_params.bias_dims}, + bias_dt, memory::format_tag::any)); + // Create an inner-product. #ifndef ENABLE_ONEDNN_V3 context_.fwd_desc.reset(new inner_product_forward::desc( @@ -304,60 +317,68 @@ class MklDnnMatMulFwdPrimitive : public MklPrimitive { std::unordered_map is_scale_set; if (!post_op_params.empty()) { for (auto const& post_op_param : post_op_params) { - if (post_op_param.name == "relu" || post_op_param.name == "leakyrelu") { + if (post_op_param.name == "Relu" || post_op_param.name == "LeakyRelu") { DCHECK_EQ(post_op_param.param.size(), 3); float op_scale = post_op_param.param[0]; float op_alpha = post_op_param.param[1]; float op_beta = post_op_param.param[2]; post_ops.APPEND_ELTWISE(op_scale, dnnl::algorithm::eltwise_relu, op_alpha, op_beta); - } else if (post_op_param.name == "relu6") { + } else if (post_op_param.name == "Relu6") { DCHECK_EQ(post_op_param.param.size(), 3); float op_scale = post_op_param.param[0]; float op_alpha = post_op_param.param[1]; float op_beta = post_op_param.param[2]; post_ops.APPEND_ELTWISE_RELU6(op_scale, op_alpha, op_beta); - } else if (post_op_param.name == "elu") { + } else if (post_op_param.name == "Elu") { DCHECK_EQ(post_op_param.param.size(), 3); float op_scale = post_op_param.param[0]; float op_alpha = post_op_param.param[1]; float op_beta = post_op_param.param[2]; post_ops.APPEND_ELTWISE(op_scale, dnnl::algorithm::eltwise_elu, op_alpha, op_beta); - } else if (post_op_param.name == "gelu_approximate") { + } else if (post_op_param.name == "GeluApproximate") { DCHECK_EQ(post_op_param.param.size(), 3); float op_scale = post_op_param.param[0]; float op_alpha = post_op_param.param[1]; float op_beta = post_op_param.param[2]; post_ops.APPEND_ELTWISE(op_scale, dnnl::algorithm::eltwise_gelu_tanh, op_alpha, op_beta); - } else if (post_op_param.name == "gelu_exact") { + } else if (post_op_param.name == "GeluExact") { DCHECK_EQ(post_op_param.param.size(), 3); float op_scale = post_op_param.param[0]; float op_alpha = post_op_param.param[1]; float op_beta = post_op_param.param[2]; post_ops.APPEND_ELTWISE(op_scale, dnnl::algorithm::eltwise_gelu_erf, op_alpha, op_beta); - } else if (post_op_param.name == "tanh") { + } else if (post_op_param.name == "Tanh") { DCHECK_EQ(post_op_param.param.size(), 3); float op_scale = post_op_param.param[0]; float op_alpha = post_op_param.param[1]; float op_beta = post_op_param.param[2]; post_ops.APPEND_ELTWISE(op_scale, dnnl::algorithm::eltwise_tanh, op_alpha, op_beta); - } else if (post_op_param.name == "logistic") { + } else if (post_op_param.name == "Sigmoid") { DCHECK_EQ(post_op_param.param.size(), 3); float op_scale = post_op_param.param[0]; float op_alpha = post_op_param.param[1]; float op_beta = post_op_param.param[2]; post_ops.APPEND_ELTWISE(op_scale, dnnl::algorithm::eltwise_logistic, op_alpha, op_beta); + } else if (post_op_param.name == "linear") { + DCHECK_EQ(post_op_param.param.size(), 3); + float op_scale = post_op_param.param[0]; + float op_alpha = post_op_param.param[1]; + float op_beta = post_op_param.param[2]; + post_ops.APPEND_ELTWISE(op_scale, dnnl::algorithm::eltwise_linear, + op_alpha, op_beta); #ifndef ENABLE_ONEDNN_V3 } else if (post_op_param.name == "output_scale") { - DCHECK_EQ(post_op_param.param.size(), 1); - std::vector scales; - scales.push_back(post_op_param.param[0]); - post_ops_attr.set_output_scales(0, scales); + if (post_op_param.param.size() == 1) { + post_ops_attr.set_output_scales(0, post_op_param.param); + } else { + post_ops_attr.set_output_scales(2, post_op_param.param); + } #else } else if (post_op_param.name == "src_scale") { is_scale_set.insert({"src", true}); @@ -368,14 +389,18 @@ class MklDnnMatMulFwdPrimitive : public MklPrimitive { new memory(*context_.src_scale_md, cpu_engine_, DummyData)); } else if (post_op_param.name == "wei_scale") { is_scale_set.insert({"wei", true}); - post_ops_attr.set_scales_mask(DNNL_ARG_WEIGHTS, 0); - context_.wei_scale_md.reset(new memory::desc({1}, MklDnnType(), - memory::format_tag::x)); + const int scale_size = post_op_param.param.size(); + const int mask = scale_size == 1 ? 0 : 1; + post_ops_attr.set_scales_mask(DNNL_ARG_WEIGHTS, mask); + context_.wei_scale_md.reset(new memory::desc( + {scale_size}, MklDnnType(), memory::format_tag::x)); context_.wei_scale_mem.reset( new memory(*context_.wei_scale_md, cpu_engine_, DummyData)); } else if (post_op_param.name == "dst_scale") { is_scale_set.insert({"dst", true}); - post_ops_attr.set_scales_mask(DNNL_ARG_DST, 0); + const int scale_size = post_op_param.param.size(); + const int mask = scale_size == 1 ? 0 : 1; + post_ops_attr.set_scales_mask(DNNL_ARG_DST, mask); context_.dst_scale_md.reset(new memory::desc({1}, MklDnnType(), memory::format_tag::x)); context_.dst_scale_mem.reset( @@ -387,13 +412,15 @@ class MklDnnMatMulFwdPrimitive : public MklPrimitive { post_ops.append_sum(op_scale); } else { - DCHECK((post_op_param.name == "relu") || - (post_op_param.name == "relu6") || - (post_op_param.name == "elu") || - (post_op_param.name == "tanh") || - (post_op_param.name == "logistic") || + DCHECK((post_op_param.name == "Relu") || + (post_op_param.name == "Relu6") || + (post_op_param.name == "Elu") || + (post_op_param.name == "GeluApproximate") || + (post_op_param.name == "GeluExact") || + (post_op_param.name == "Tanh") || + (post_op_param.name == "Sigmoid") || (post_op_param.name == "sum") || - (post_op_param.name == "leakyrelu") || OUTPUT_SCALE_DCHECK); + (post_op_param.name == "Leakyrelu") || OUTPUT_SCALE_DCHECK); } } post_ops_attr.set_post_ops(post_ops); @@ -433,11 +460,15 @@ class MklDnnMatMulFwdPrimitive : public MklPrimitive { {DNNL_ARG_SCRATCHPAD, *context_.sp_mem}, {DNNL_ARG_DST, *context_.dst_mem}}; #ifdef ENABLE_ONEDNN_V3 - if (is_scale_set["src"] && is_scale_set["wei"] && is_scale_set["dst"]) { + if (is_scale_set["src"]) { net_args.insert( {DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.src_scale_mem}); + } + if (is_scale_set["wei"]) { net_args.insert( {DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS, *context_.wei_scale_mem}); + } + if (is_scale_set["dst"]) { net_args.insert( {DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST, *context_.dst_scale_mem}); } @@ -510,12 +541,12 @@ class MklDnnMatMulFwdPrimitiveFactory : public MklPrimitiveFactory { // Generate keys for post-ops for (auto const& post_op_param : mkldnn_matmul_fwd_dims.post_op_params) { - if (post_op_param.name == "relu" || post_op_param.name == "relu6" || - post_op_param.name == "elu" || post_op_param.name == "tanh" || - post_op_param.name == "logistic" || - post_op_param.name == "leakyrelu" || - post_op_param.name == "gelu_approximate" || - post_op_param.name == "gelu_exact") { + if (post_op_param.name == "Relu" || post_op_param.name == "Relu6" || + post_op_param.name == "Elu" || post_op_param.name == "Tanh" || + post_op_param.name == "Sigmoid" || + post_op_param.name == "LeakyRelu" || + post_op_param.name == "GeluApproximate" || + post_op_param.name == "GeluExact" || post_op_param.name == "linear") { DCHECK_EQ(post_op_param.param.size(), 3); key_creator.AddAsKey(post_op_param.name); key_creator.AddAsKey(post_op_param.param[0]); @@ -532,9 +563,16 @@ class MklDnnMatMulFwdPrimitiveFactory : public MklPrimitiveFactory { post_op_param.name == "wei_scale" || post_op_param.name == "dst_scale") { #endif // !ENABLE_ONEDNN_V3 - DCHECK_EQ(post_op_param.param.size(), 1); key_creator.AddAsKey(post_op_param.name); - key_creator.AddAsKey(post_op_param.param[0]); + if (post_op_param.partial_key.empty()) { + DCHECK_GE(post_op_param.param.size(), 1); + // Old Quantized MatMul kernels do not create part of key beforehand + // as primitive caching-key-creation optimization. + key_creator.AddAsKey(post_op_param.param[0]); + } else { + // New Quantized MatMul kernels pre-create partial key. + key_creator.AddAsKey(post_op_param.partial_key); + } } else { return string("not_a_key"); } diff --git a/tensorflow/core/kernels/mkl/mkl_qmatmul_op.cc b/tensorflow/core/kernels/mkl/mkl_qmatmul_op.cc index efb33375d1669d..92a21e1255c778 100644 --- a/tensorflow/core/kernels/mkl/mkl_qmatmul_op.cc +++ b/tensorflow/core/kernels/mkl/mkl_qmatmul_op.cc @@ -783,7 +783,7 @@ class MklDnnQuantizedMatMulReluOp MklDnnQuantizedMatMulOp::ExtendMklDnnMatMulFwdParams(context, params); - params.post_op_params.push_back({"relu", {1.0, 0.0, 0.0}}); + params.post_op_params.push_back({"Relu", {1.0, 0.0, 0.0}}); } }; diff --git a/tensorflow/core/kernels/mkl/mkl_qmatmul_op_test.cc b/tensorflow/core/kernels/mkl/mkl_qmatmul_op_test.cc index 22b56e19e3bb63..3d862e0b8e8fd5 100644 --- a/tensorflow/core/kernels/mkl/mkl_qmatmul_op_test.cc +++ b/tensorflow/core/kernels/mkl/mkl_qmatmul_op_test.cc @@ -35,23 +35,43 @@ limitations under the License. namespace tensorflow { -class QuantizedMatMulTest : public OpsTestBase {}; +class QuantizedMatMulTest : public OpsTestBase, + public ::testing::WithParamInterface {}; // Two small matrices A of type uint8 and B of type int8 are multiplied // and the result is added with int32 bias -TEST_F(QuantizedMatMulTest, Small_withBias) { - TF_ASSERT_OK( - NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias") - .Input(FakeInput(DT_QUINT8)) - .Input(FakeInput(DT_QINT8)) - .Input(FakeInput(DT_QINT32)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Attr("Toutput", DataTypeToEnum::v()) - .Attr("_kernel", "QuantizedMklOp") - .Finalize(node_def())); +TEST_P(QuantizedMatMulTest, Small_withBias) { + const bool is_old_api = GetParam(); + if (is_old_api) { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias") + .Input(FakeInput(DT_QUINT8)) + .Input(FakeInput(DT_QINT8)) + .Input(FakeInput(DT_QINT32)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Attr("Toutput", DataTypeToEnum::v()) + .Attr("_kernel", "QuantizedMklOp") + .Finalize(node_def())); + } else { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul") + .Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT, + DT_FLOAT, DT_FLOAT, DT_FLOAT}) + .Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT}) + .Attr("Tdevice_inputs", std::vector()) + .Attr("Tdevice_outputs", std::vector()) + .Attr("T1", DT_QUINT8) + .Attr("T2", DT_QINT8) + .Attr("Tbias", DT_QINT32) + .Attr("Tout", DT_QINT32) + .Attr("fused_ops", {"BiasAdd"}) + .Input(FakeInput()) + .Input(FakeInput()) + .Finalize(node_def())); + } TF_ASSERT_OK(InitOp()); // A matrix is: // | 1 | 2 | 3 | @@ -91,19 +111,38 @@ TEST_F(QuantizedMatMulTest, Small_withBias) { // Two small matrices A of type uint8 and B of type int8 are multiplied // and the result is added with neg bias as well -TEST_F(QuantizedMatMulTest, Small_withNegBias) { - TF_ASSERT_OK( - NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias") - .Input(FakeInput(DT_QUINT8)) - .Input(FakeInput(DT_QINT8)) - .Input(FakeInput(DT_QINT32)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Attr("Toutput", DataTypeToEnum::v()) - .Attr("_kernel", "QuantizedMklOp") - .Finalize(node_def())); +TEST_P(QuantizedMatMulTest, Small_withNegBias) { + const bool is_old_api = GetParam(); + if (is_old_api) { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias") + .Input(FakeInput(DT_QUINT8)) + .Input(FakeInput(DT_QINT8)) + .Input(FakeInput(DT_QINT32)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Attr("Toutput", DataTypeToEnum::v()) + .Attr("_kernel", "QuantizedMklOp") + .Finalize(node_def())); + } else { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul") + .Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT, + DT_FLOAT, DT_FLOAT, DT_FLOAT}) + .Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT}) + .Attr("Tdevice_inputs", std::vector()) + .Attr("Tdevice_outputs", std::vector()) + .Attr("T1", DT_QUINT8) + .Attr("T2", DT_QINT8) + .Attr("Tbias", DT_QINT32) + .Attr("Tout", DT_QINT32) + .Attr("fused_ops", {"BiasAdd"}) + .Input(FakeInput()) + .Input(FakeInput()) + .Finalize(node_def())); + } TF_ASSERT_OK(InitOp()); // A matrix is: // | 1 | 2 | 3 | @@ -144,20 +183,40 @@ TEST_F(QuantizedMatMulTest, Small_withNegBias) { // Two small matrices A of type uint8 (converted from signed integer) // and B of type int8 are multiplied and the result is added with float bias -TEST_F(QuantizedMatMulTest, Small_WithNegInp) { - TF_ASSERT_OK( - NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias") - .Input(FakeInput(DT_QUINT8)) - .Input(FakeInput(DT_QINT8)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Attr("Toutput", DataTypeToEnum::v()) - .Attr("input_quant_mode", "MIN_FIRST") - .Attr("_kernel", "QuantizedMklOp") - .Finalize(node_def())); +TEST_P(QuantizedMatMulTest, Small_WithNegInp) { + const bool is_old_api = GetParam(); + if (is_old_api) { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias") + .Input(FakeInput(DT_QUINT8)) + .Input(FakeInput(DT_QINT8)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Attr("Toutput", DataTypeToEnum::v()) + .Attr("input_quant_mode", "MIN_FIRST") + .Attr("_kernel", "QuantizedMklOp") + .Finalize(node_def())); + } else { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul") + .Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT, DT_FLOAT, + DT_FLOAT, DT_FLOAT, DT_FLOAT}) + .Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT}) + .Attr("Tdevice_inputs", std::vector()) + .Attr("Tdevice_outputs", std::vector()) + .Attr("T1", DT_QUINT8) + .Attr("T2", DT_QINT8) + .Attr("Tbias", DT_FLOAT) + .Attr("Tout", DT_QINT32) + .Attr("fused_ops", {"BiasAdd"}) + .Attr("input_quant_mode", "MIN_FIRST") + .Input(FakeInput()) + .Input(FakeInput()) + .Finalize(node_def())); + } TF_ASSERT_OK(InitOp()); // The A matrix is: // | -1 | -5 | -9 | @@ -213,21 +272,41 @@ TEST_F(QuantizedMatMulTest, Small_WithNegInp) { // Two small matrices A of type uint8 and B of type int8 are multiplied // and the result is added with int32 bias and Requantization fusion -TEST_F(QuantizedMatMulTest, Small_withBiasAndReq) { - TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", - "_MklQuantizedMatMulWithBiasAndRequantize") - .Input(FakeInput(DT_QUINT8)) - .Input(FakeInput(DT_QINT8)) - .Input(FakeInput(DT_QINT32)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Attr("Toutput", DataTypeToEnum::v()) - .Attr("_kernel", "QuantizedMklOp") - .Finalize(node_def())); +TEST_P(QuantizedMatMulTest, Small_withBiasAndReq) { + const bool is_old_api = GetParam(); + if (is_old_api) { + TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", + "_MklQuantizedMatMulWithBiasAndRequantize") + .Input(FakeInput(DT_QUINT8)) + .Input(FakeInput(DT_QINT8)) + .Input(FakeInput(DT_QINT32)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Attr("Toutput", DataTypeToEnum::v()) + .Attr("_kernel", "QuantizedMklOp") + .Finalize(node_def())); + } else { + TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul") + .Attr("Thost_inputs", + {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT, DT_FLOAT, + DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT}) + .Attr("Thost_outputs", {DT_QUINT8, DT_FLOAT, DT_FLOAT}) + .Attr("Tdevice_inputs", std::vector()) + .Attr("Tdevice_outputs", std::vector()) + .Attr("T1", DT_QUINT8) + .Attr("T2", DT_QINT8) + .Attr("Tbias", DT_QINT32) + .Attr("Tout", DT_QUINT8) + .Attr("fused_ops", {"BiasAdd", "Requantize"}) + .Input(FakeInput()) + .Input(FakeInput()) + .Finalize(node_def())); + } + TF_ASSERT_OK(InitOp()); // A matrix is: // | 1 | 2 | 3 | @@ -273,11 +352,25 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndReq) { // 178 * 1.00392 ~= 178.698 ~= 179 Tensor expected(allocator(), DT_QUINT8, TensorShape({2, 4})); + if (is_old_api) { #ifdef ENABLE_ONEDNN_V3 - test::FillValues(&expected, {84, 60, 116, 52, 183, 168, 233, 178}); + test::FillValues(&expected, {84, 60, 116, 52, 183, 168, 233, 178}); #else - test::FillValues(&expected, {84, 60, 116, 52, 184, 169, 234, 179}); + test::FillValues(&expected, {84, 60, 116, 52, 184, 169, 234, 179}); #endif // ENABLE_ONEDNN_V3 + } else { + // New api uses more numerical precision preserving equation. Old api scales + // up to 32-bit and then scales down from 32-bit to 8-bit. New api instead + // does a dequantization followed by a scaling to 8-bit. + // In this test, + // input deq. scale = ((255.0 * 127.0) / (255.0 * 127.0)) = 1.0 + // output req. scale = 255.0 / 255.0 = 1.0 + // combined scale = 1.0 * 1.0 = 1.0 + // Note: new api scale value is 1.0, whereas the old api scale is 1.000392. + // Correct value is 1.0f. The closer it is to the correct value the better + // the formula is. + test::FillValues(&expected, {84, 60, 116, 52, 183, 168, 233, 178}); + } const Tensor& output = *GetOutput(0); test::ExpectTensorEqual(expected, output); @@ -285,21 +378,40 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndReq) { // Two small matrices A of type uint8 and B of type int8 are multiplied // and the result is added with int32 bias and Requantization fusion -TEST_F(QuantizedMatMulTest, Small_withBiasAndDeq) { - TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", - "_MklQuantizedMatMulWithBiasAndDequantize") - .Input(FakeInput(DT_QUINT8)) - .Input(FakeInput(DT_QINT8)) - .Input(FakeInput(DT_QINT32)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Attr("Toutput", DataTypeToEnum::v()) - .Attr("_kernel", "QuantizedMklOp") - .Finalize(node_def())); +TEST_P(QuantizedMatMulTest, Small_withBiasAndDeq) { + const bool is_old_api = GetParam(); + if (is_old_api) { + TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", + "_MklQuantizedMatMulWithBiasAndDequantize") + .Input(FakeInput(DT_QUINT8)) + .Input(FakeInput(DT_QINT8)) + .Input(FakeInput(DT_QINT32)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Attr("Toutput", DataTypeToEnum::v()) + .Attr("_kernel", "QuantizedMklOp") + .Finalize(node_def())); + } else { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul") + .Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT, + DT_FLOAT, DT_FLOAT, DT_FLOAT}) + .Attr("Thost_outputs", {DT_FLOAT}) + .Attr("Tdevice_inputs", std::vector()) + .Attr("Tdevice_outputs", std::vector()) + .Attr("T1", DT_QUINT8) + .Attr("T2", DT_QINT8) + .Attr("Tbias", DT_QINT32) + .Attr("Tout", DT_FLOAT) + .Attr("fused_ops", {"BiasAdd", "Dequantize"}) + .Input(FakeInput()) + .Input(FakeInput()) + .Finalize(node_def())); + } TF_ASSERT_OK(InitOp()); // A matrix is: // | 1 | 2 | 3 | @@ -316,9 +428,11 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndDeq) { AddInputFromArray(TensorShape({}), {255.0f}); AddInputFromArray(TensorShape({}), {-127.0f}); AddInputFromArray(TensorShape({}), {127.0f}); - AddInputFromArray(TensorShape({}), {0}); - AddInputFromArray(TensorShape({}), {255.0f}); + if (is_old_api) { + AddInputFromArray(TensorShape({}), {0}); + AddInputFromArray(TensorShape({}), {255.0f}); + } TF_ASSERT_OK(RunOpKernel()); // Here are the results we expect, from hand calculations: // (1 * 7) + (2 * 11) + (3 * 15) = 74 @@ -353,19 +467,38 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndDeq) { // Two small matrices A of type uint8 and B of type int8 are multiplied // and the result is added with float bias and then performed relu on the result -TEST_F(QuantizedMatMulTest, Small_withBiasAndRelu) { - TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", - "_MklQuantizedMatMulWithBiasAndRelu") - .Input(FakeInput(DT_QUINT8)) - .Input(FakeInput(DT_QINT8)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Attr("Toutput", DataTypeToEnum::v()) - .Attr("_kernel", "QuantizedMklOp") - .Finalize(node_def())); +TEST_P(QuantizedMatMulTest, Small_withBiasAndRelu) { + const bool is_old_api = GetParam(); + if (is_old_api) { + TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", + "_MklQuantizedMatMulWithBiasAndRelu") + .Input(FakeInput(DT_QUINT8)) + .Input(FakeInput(DT_QINT8)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Attr("Toutput", DataTypeToEnum::v()) + .Attr("_kernel", "QuantizedMklOp") + .Finalize(node_def())); + } else { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul") + .Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT, DT_FLOAT, + DT_FLOAT, DT_FLOAT, DT_FLOAT}) + .Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT}) + .Attr("Tdevice_inputs", std::vector()) + .Attr("Tdevice_outputs", std::vector()) + .Attr("T1", DT_QUINT8) + .Attr("T2", DT_QINT8) + .Attr("Tbias", DT_FLOAT) + .Attr("Tout", DT_QINT32) + .Attr("fused_ops", {"BiasAdd", "Relu"}) + .Input(FakeInput()) + .Input(FakeInput()) + .Finalize(node_def())); + } TF_ASSERT_OK(InitOp()); // A matrix is: // | 1 | 2 | 3 | @@ -408,21 +541,41 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndRelu) { // Simple test for Matrix multiplication with Bias, Relu and // Requantization fusion -TEST_F(QuantizedMatMulTest, Small_withBiasAndReluAndReq) { - TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", - "_MklQuantizedMatMulWithBiasAndReluAndRequantize") - .Input(FakeInput(DT_QUINT8)) - .Input(FakeInput(DT_QINT8)) - .Input(FakeInput(DT_QINT32)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Attr("Toutput", DataTypeToEnum::v()) - .Attr("_kernel", "QuantizedMklOp") - .Finalize(node_def())); +TEST_P(QuantizedMatMulTest, Small_withBiasAndReluAndReq) { + const bool is_old_api = GetParam(); + if (is_old_api) { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", + "_MklQuantizedMatMulWithBiasAndReluAndRequantize") + .Input(FakeInput(DT_QUINT8)) + .Input(FakeInput(DT_QINT8)) + .Input(FakeInput(DT_QINT32)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Attr("Toutput", DataTypeToEnum::v()) + .Attr("_kernel", "QuantizedMklOp") + .Finalize(node_def())); + } else { + TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul") + .Attr("Thost_inputs", + {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT, DT_FLOAT, + DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT}) + .Attr("Thost_outputs", {DT_QUINT8, DT_FLOAT, DT_FLOAT}) + .Attr("Tdevice_inputs", std::vector()) + .Attr("Tdevice_outputs", std::vector()) + .Attr("T1", DT_QUINT8) + .Attr("T2", DT_QINT8) + .Attr("Tbias", DT_QINT32) + .Attr("Tout", DT_QUINT8) + .Attr("fused_ops", {"BiasAdd", "Relu", "Requantize"}) + .Input(FakeInput()) + .Input(FakeInput()) + .Finalize(node_def())); + } TF_ASSERT_OK(InitOp()); // A matrix is: // | 1 | 2 | 3 | @@ -470,11 +623,25 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndReluAndReq) { // 178 * 1.00392 ~= 178.698 ~= 179 Tensor expected(allocator(), DT_QUINT8, TensorShape({2, 4})); + if (is_old_api) { #ifdef ENABLE_ONEDNN_V3 - test::FillValues(&expected, {84, 60, 116, 52, 183, 168, 233, 178}); + test::FillValues(&expected, {84, 60, 116, 52, 183, 168, 233, 178}); #else - test::FillValues(&expected, {84, 60, 116, 52, 184, 169, 234, 179}); + test::FillValues(&expected, {84, 60, 116, 52, 184, 169, 234, 179}); #endif // ENABLE_ONEDNN_V3 + } else { + // New api uses more numerical precision preserving equation. Old api scales + // up to 32-bit and then scales down from 32-bit to 8-bit. New api instead + // does a dequantization followed by a scaling to 8-bit. + // In this test, + // input deq. scale = ((255.0 * 127.0) / (255.0 * 127.0)) = 1.0 + // output req. scale = 255.0 / 255.0 = 1.0 + // combined scale = 1.0 * 1.0 = 1.0 + // Note: new api scale value is 1.0, whereas the old api scale is 1.000392. + // Correct value is 1.0f. The closer it is to the correct value the better + // the formula is. + test::FillValues(&expected, {84, 60, 116, 52, 183, 168, 233, 178}); + } const Tensor& output = *GetOutput(0); test::ExpectTensorEqual(expected, output); @@ -484,19 +651,38 @@ TEST_F(QuantizedMatMulTest, Small_withBiasAndReluAndReq) { // and the result is added with int32 bias // For the first time B matrix will be reordered and cached which will be // used for subsequent runs -TEST_F(QuantizedMatMulTest, Small_withWeightCached) { - TF_ASSERT_OK( - NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias") - .Input(FakeInput(DT_QUINT8)) - .Input(FakeInput(DT_QINT8)) - .Input(FakeInput(DT_QINT32)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Attr("Toutput", DataTypeToEnum::v()) - .Attr("_kernel", "QuantizedMklOp") - .Finalize(node_def())); +TEST_P(QuantizedMatMulTest, Small_withWeightCached) { + const bool is_old_api = GetParam(); + if (is_old_api) { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias") + .Input(FakeInput(DT_QUINT8)) + .Input(FakeInput(DT_QINT8)) + .Input(FakeInput(DT_QINT32)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Input(FakeInput(DT_FLOAT)) + .Attr("Toutput", DataTypeToEnum::v()) + .Attr("_kernel", "QuantizedMklOp") + .Finalize(node_def())); + } else { + TF_ASSERT_OK( + NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul") + .Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT, + DT_FLOAT, DT_FLOAT, DT_FLOAT}) + .Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT}) + .Attr("Tdevice_inputs", std::vector()) + .Attr("Tdevice_outputs", std::vector()) + .Attr("T1", DT_QUINT8) + .Attr("T2", DT_QINT8) + .Attr("Tbias", DT_QINT32) + .Attr("Tout", DT_QINT32) + .Attr("fused_ops", {"BiasAdd"}) + .Input(FakeInput()) + .Input(FakeInput()) + .Finalize(node_def())); + } TF_ASSERT_OK(InitOp()); // The tensor shape of (1,3) is selected to allow the oneDNN expected // weight format to be made as OI rather than IO for BS > 1 @@ -549,6 +735,9 @@ TEST_F(QuantizedMatMulTest, Small_withWeightCached) { test::ExpectTensorEqual(expected, output_new); } +INSTANTIATE_TEST_SUITE_P(All, QuantizedMatMulTest, + ::testing::Values(true, false)); + } // namespace tensorflow #endif // INTEL_MKL diff --git a/tensorflow/core/kernels/mkl/onednn_fused_matmul_ops_test.cc b/tensorflow/core/kernels/mkl/onednn_fused_matmul_ops_test.cc new file mode 100644 index 00000000000000..a98ddbcc359c82 --- /dev/null +++ b/tensorflow/core/kernels/mkl/onednn_fused_matmul_ops_test.cc @@ -0,0 +1,748 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#if defined(INTEL_MKL) + +#define EIGEN_USE_THREADS + +#include +#include + +#include "gtest/gtest.h" +#include "absl/algorithm/container.h" +#include "absl/strings/match.h" +#include "tensorflow/cc/ops/array_ops.h" +#include "tensorflow/cc/ops/const_op.h" +#include "tensorflow/cc/ops/math_ops.h" +#include "tensorflow/cc/ops/nn_ops.h" +#include "tensorflow/cc/ops/nn_ops_internal.h" +#include "tensorflow/cc/ops/standard_ops.h" +#include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/node_def_builder.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/graph/node_builder.h" +#include "tensorflow/core/kernels/mkl/mkl_kernel_util.h" +#include "tensorflow/core/kernels/ops_testutil.h" +#include "tensorflow/core/kernels/ops_util.h" +#include "tensorflow/core/kernels/quantization_utils.h" +#include "tensorflow/core/lib/core/status_test_util.h" +#include "tensorflow/core/platform/env.h" +#include "tensorflow/core/platform/test.h" +#include "tensorflow/core/protobuf/rewriter_config.pb.h" +#include "tensorflow/core/public/session.h" +#include "tensorflow/core/util/util.h" +#include "unsupported/Eigen/CXX11/Tensor" + +namespace tensorflow { + +// The test suite contains different categories of tests. +// (1) Realnumber (float/bfloat16): The output of _FusedMatMul should be +// close enough to the final output of the sequence of unfused operations. +// Only Gelu fusion is included here. All other fusion tests can be found in +// tensorflow/core/kernels/mkl/mkl_fused_ops_test.cc +// +// (2) Quantized: Possible fusions are done in _QuantizedMatMul op. The +// output of +// quantize --> quantized_op --> dequantize, or +// quantize --> quantized_op --> requantize --> dequantize +// should be close (with a higher tolerance) to the final output of the +// sequence of unfused real number type operations. For the quantized +// scenario, it is assumed that the first matrix of MatMul op represents +// feature, while the second matrix represents weight parameters. The feature +// matrix can be quantized with MIN_FIRST (to QUINT8) or SCALED (to QINT8) +// mode and always quantized per-tensor. The weight can be quantized with +// per-tensor or per-channel, only with SCALED mode to QINT8. + +// T: float or bfloat16 used as tensor type of the MatMul and fusion operation. +template +class FusedMatMulOpsTest : public OpsTestBase { + private: + float leakyrelu_alpha_ = 0.2f; + + protected: + struct FusedOpsAndDims { + // List of fusions. + std::vector fused_ops; + // Tensor dimension associated with the fusions. It is assumed here that + // each fusion requires no more than one addtional tensor. If some fusion + // does not require a tensor, e.g., Relu, the tensor dimensions will be {0} + // implying an an empty tensor. + std::vector> fusion_dims; + }; + + struct FusedOpsAndTensors { + // List of fusions. + std::vector fused_ops; + // Tensors associated with the fusions. It is assumed here that each fusion + // requires no more than one additional tensor. If some fusion does not + // require a tensor, e.g., Relu, the tensor will be an empty tensor. + std::vector fusion_tensors; + }; + + using GraphRunner = + std::function; + + using QuantizedGraphRunner = std::function; + + bool HasQuantizationSupport() { + return TestCPUFeature(tensorflow::port::CPUFeature::AVX_VNNI_INT8) || + TestCPUFeature(tensorflow::port::CPUFeature::AVX512_VNNI) || + TestCPUFeature(port::CPUFeature::AMX_INT8); + } + + // Runs a Tensorflow graph defined by the root scope, and fetches the result + // of 'fetch' node into the outputs. Optional `add_nodes` parameter + // allows to define nodes directly using NodeDefBuilder. + void RunAndFetch(const tensorflow::Scope& root, + const std::vector& fetch, + std::vector* outputs, + const std::vector add_nodes = {}) { + tensorflow::GraphDef graph; + TF_ASSERT_OK(root.ToGraphDef(&graph)); + + for (const NodeDef* add_node : add_nodes) { + *graph.add_node() = *add_node; + } + + // We really want to make sure that graph executed exactly as we passed it + // to the session, so we disable various optimizations. + tensorflow::SessionOptions session_options; + + // Disable common runtime constant folding. + session_options.config.mutable_graph_options() + ->mutable_optimizer_options() + ->set_opt_level(OptimizerOptions::L0); + + // Disable Grappler optimizations for tests. + tensorflow::RewriterConfig* cfg = + session_options.config.mutable_graph_options() + ->mutable_rewrite_options(); + cfg->set_constant_folding(tensorflow::RewriterConfig::OFF); + cfg->set_layout_optimizer(tensorflow::RewriterConfig::OFF); + cfg->set_remapping(tensorflow::RewriterConfig::OFF); + + std::unique_ptr session( + tensorflow::NewSession(session_options)); + + const string device = "/device:CPU:0"; + for (NodeDef& mutable_node : *graph.mutable_node()) { + mutable_node.set_device(device); + } + + TF_ASSERT_OK(session->Create(graph)); + TF_ASSERT_OK(session->Run({}, fetch, {}, outputs)); + } + + Output ActivationOp(Scope& root, string op, Output x, string name) { + // TODO(intel-tf): Add GeluExact (Erf op based) when the Erf op is enabled + // for bfloat16. GeluExact with float32 precision test can be found in + // tensorflow/python/grappler/remapper_test.py + if (op == "Relu") { + return ops::Relu(root.WithOpName(name), x); + } else if (op == "Relu6") { + return ops::Relu6(root.WithOpName(name), x); + } else if (op == "LeakyRelu") { + return ops::internal::LeakyRelu( + root.WithOpName(name), x, + ops::internal::LeakyRelu::Attrs().Alpha(this->leakyrelu_alpha_)); + } else if (op == "Elu") { + return ops::Elu(root.WithOpName(name), x); + } else if (op == "Tanh") { + return ops::Tanh(root.WithOpName(name), x); + } else if (op == "Sigmoid") { + return ops::Sigmoid(root.WithOpName(name), x); + } else if (op == "GeluApproximate") { + Output three = ops::Const(root.WithOpName("gelu_three"), 3.0f); + Output empirical = + ops::Const(root.WithOpName("gelu_empirical"), 0.044715f); + Output square_root_two_over_pi = ops::Const( + root.WithOpName("gelu_square_root_two_over_pi"), 0.7978845608028654f); + Output one = ops::Const(root.WithOpName("gelu_one"), 1.0f); + Output half = ops::Const(root.WithOpName("gelu_half"), 0.5f); + Output pow = ops::Pow(root.WithOpName("gelu_pow"), x, three); + Output mul1 = ops::Multiply(root.WithOpName("gelu_mul1"), empirical, pow); + Output add1 = ops::AddV2(root.WithOpName("gelu_add1"), x, mul1); + Output mul2 = ops::Multiply(root.WithOpName("gelu_mul2"), + square_root_two_over_pi, add1); + Output tanh = ops::Tanh(root.WithOpName("gelu_tanh"), mul2); + Output add3 = ops::AddV2(root.WithOpName("gelu_add3"), one, tanh); + Output mul3 = ops::Multiply(root.WithOpName("gelu_mul3"), half, x); + return ops::Multiply(root.WithOpName(name), mul3, add3); + } else { + EXPECT_TRUE(false) << absl::StrCat("The activation: ", op, + " is not supported in this test."); + } + } + + void RunMatMulAndFusedOps(const Tensor& x, const Tensor& y, + const FusedOpsAndTensors& fused_ops_and_tensors, + Tensor* result, bool transpose_x, + bool transpose_y) { + Scope root = tensorflow::Scope::NewRootScope(); + + Output x_input = + ops::Const(root.WithOpName("x_input"), Input::Initializer(x)); + Output y_input = + ops::Const(root.WithOpName("y_input"), Input::Initializer(y)); + Output last_output = ops::MatMul( + root.WithOpName("matmul"), x_input, y_input, + ops::MatMul::Attrs().TransposeA(transpose_x).TransposeB(transpose_y)); + auto& fused_ops = fused_ops_and_tensors.fused_ops; + auto& fusion_tensors = fused_ops_and_tensors.fusion_tensors; + for (int i = 0; i < fused_ops.size(); ++i) { + const string& op = fused_ops[i]; + if (op == "BiasAdd") { + Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)), + Input::Initializer(fusion_tensors[i])); + last_output = ops::BiasAdd( + root.WithOpName(absl::StrCat("bias_add_at_", i)), last_output, arg); + } else if (op == "Relu" || op == "Relu6" || op == "LeakyRelu" || + op == "Elu" || op == "Tanh" || op == "Sigmoid" || + op == "GeluApproximate") { + last_output = + ActivationOp(root, op, last_output, absl::StrCat(op, "_at_", i)); + } else if (op == "Add") { + ASSERT_EQ(x.dtype(), fusion_tensors[i].dtype()); + Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)), + Input::Initializer(fusion_tensors[i])); + last_output = ops::AddV2(root.WithOpName(absl::StrCat("add_at_", i)), + last_output, arg); + } else { + EXPECT_TRUE(false) << absl::StrCat("The fusion: [", + absl::StrJoin(fused_ops, ","), + "] is not supported in this test."); + } + } + std::vector outputs; + RunAndFetch(root, {last_output.name()}, &outputs); + *result = outputs[0]; + } + + void RunFusedMatMul(const Tensor& x, const Tensor& y, + const FusedOpsAndTensors& fused_ops_and_tensors, + Tensor* result, bool transpose_x, bool transpose_y) { + Scope root = tensorflow::Scope::NewRootScope(); + + DataType dtype = DataTypeToEnum::v(); + + Output x_input = + ops::Const(root.WithOpName("x_input"), Input::Initializer(x)); + Output y_input = + ops::Const(root.WithOpName("y_input"), Input::Initializer(y)); + auto& fused_ops = fused_ops_and_tensors.fused_ops; + auto& fusion_tensors = fused_ops_and_tensors.fusion_tensors; + int num_fusion_inputs = 0; + bool has_leaky_relu = false; + std::vector fusion_inputs; + for (int i = 0; i < fused_ops.size(); ++i) { + const string& op = fused_ops[i]; + if (op == "BiasAdd") { + Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)), + Input::Initializer(fusion_tensors[i])); + fusion_inputs.push_back({arg.name(), 0, dtype}); + num_fusion_inputs++; + } else if (op == "Add") { + ASSERT_EQ(x.dtype(), fusion_tensors[i].dtype()); + Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)), + Input::Initializer(fusion_tensors[i])); + fusion_inputs.push_back({arg.name(), 0, dtype}); + num_fusion_inputs++; + } else if (op == "LeakyRelu") { + has_leaky_relu = true; + } else { + bool is_supported = op == "Relu" || op == "Relu6" || + op == "LeakyRelu" || op == "Elu" || op == "Tanh" || + op == "Sigmoid" || op == "GeluApproximate"; + EXPECT_TRUE(is_supported) + << absl::StrCat("The fusion: [", absl::StrJoin(fused_ops, ","), + "] is not supported in this test."); + } + } + NodeDef fused_matmul; + std::vector add_nodes; + TF_EXPECT_OK(NodeDefBuilder("fused_batch_matmul", "_MklNativeFusedMatMul") + .Input({x_input.name(), 0, dtype}) + .Input({y_input.name(), 0, dtype}) + .Input(fusion_inputs) + .Attr("transpose_a", transpose_x) + .Attr("transpose_b", transpose_y) + .Attr("num_args", num_fusion_inputs) + .Attr("fused_ops", fused_ops) + .Attr("leakyrelu_alpha", + has_leaky_relu ? this->leakyrelu_alpha_ : 0.2f) + .Attr("_kernel", "MklNameChangeOp") + .Finalize(&fused_matmul)); + add_nodes = {&fused_matmul}; + std::vector outputs; + RunAndFetch(root, {fused_matmul.name()}, &outputs, add_nodes); + *result = outputs[0]; + } + + // Compute quantized tensor perchannel (aka axis) in SCALED mode for 2D + // tensor. + template + void GetPerchannelQuantizationTensors(const Tensor& input, Tensor* output, + Tensor* min_tensor, + Tensor* max_tensor) { + ASSERT_EQ(input.dims(), 2); + ASSERT_EQ(output->dtype(), DT_QINT8); + constexpr int axis = transpose ? 0 : 1; + int num_channels = input.dim_size(axis); + ASSERT_EQ(min_tensor->NumElements(), num_channels); + ASSERT_EQ(max_tensor->NumElements(), num_channels); + + auto eigen_input_tensor = input.matrix().template cast(); + auto eigen_output_tensor = output->matrix(); + std::vector scales(num_channels); + float* min_tensor_buf = min_tensor->flat().data(); + float* max_tensor_buf = max_tensor->flat().data(); + for (int i = 0; i < num_channels; ++i) { + auto input_slice = eigen_input_tensor.template chip(i); + auto output_slice = eigen_output_tensor.template chip(i); + Eigen::Tensor min = input_slice.minimum(); + Eigen::Tensor max = input_slice.maximum(); + float min_i = min(); + float max_i = max(); + float range = std::max(std::abs(min_i), std::abs(max_i)); + min_tensor_buf[i] = -range; + max_tensor_buf[i] = range; + const float scale = 127.0f / range; + output_slice = (input_slice * scale).round().template cast(); + } + } + + void RunQuantizedMatMul(const Tensor& x, const Tensor& y, + const FusedOpsAndTensors& fused_ops_and_tensors, + Tensor* result, bool transpose_x, bool transpose_y, + string input_quant_mode, string output_quant_mode, + bool is_bias_quantized, bool is_perchannel, + bool requantize, float output_min, float output_max) { + // TODO(intel-tf): Extend test with quantized bias + ASSERT_EQ(is_bias_quantized, false); + + DataType real_dtype = DataTypeToEnum::v(); + DataType qinput_dtype = + (input_quant_mode == "MIN_FIRST") ? DT_QUINT8 : DT_QINT8; + // Quantize x and y + Tensor x_qtensor(qinput_dtype, x.shape()); + Tensor x_min_tensor(DT_FLOAT, TensorShape({})); + Tensor x_max_tensor(DT_FLOAT, TensorShape({})); + auto status = MklTestingUtil::GetQuantizationTensors( + x, &x_qtensor, qinput_dtype, input_quant_mode, &x_min_tensor, + &x_max_tensor); + ASSERT_TRUE(status.ok()); + Tensor y_qtensor(DT_QINT8, y.shape()); + const int num_channels = transpose_y ? y.dim_size(0) : y.dim_size(1); + TensorShape minmax_shape = + is_perchannel ? TensorShape({num_channels}) : TensorShape({}); + Tensor y_min_tensor(DT_FLOAT, minmax_shape); + Tensor y_max_tensor(DT_FLOAT, minmax_shape); + if (is_perchannel) { + if (transpose_y) { + GetPerchannelQuantizationTensors(y, &y_qtensor, &y_min_tensor, + &y_max_tensor); + } else { + GetPerchannelQuantizationTensors(y, &y_qtensor, &y_min_tensor, + &y_max_tensor); + } + } else { + auto status = MklTestingUtil::GetQuantizationTensors( + y, &y_qtensor, DT_QINT8, "SCALED", &y_min_tensor, &y_max_tensor); + ASSERT_TRUE(status.ok()); + } + + Scope root = tensorflow::Scope::NewRootScope(); + + Output x_input = + ops::Const(root.WithOpName("x_input"), Input::Initializer(x_qtensor)); + Output x_min = + ops::Const(root.WithOpName("x_min"), Input::Initializer(x_min_tensor)); + Output x_max = + ops::Const(root.WithOpName("x_max"), Input::Initializer(x_max_tensor)); + Output y_input = + ops::Const(root.WithOpName("y_input"), Input::Initializer(y_qtensor)); + Output y_min = + ops::Const(root.WithOpName("y_min"), Input::Initializer(y_min_tensor)); + Output y_max = + ops::Const(root.WithOpName("y_max"), Input::Initializer(y_max_tensor)); + auto& fused_ops = fused_ops_and_tensors.fused_ops; + auto& fusion_tensors = fused_ops_and_tensors.fusion_tensors; + int num_fusion_inputs = 0; + std::vector fusion_inputs; + bool has_leaky_relu = false; + for (int i = 0; i < fused_ops.size(); ++i) { + const string& op = fused_ops[i]; + if (op == "BiasAdd") { + Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)), + Input::Initializer(fusion_tensors[i])); + fusion_inputs.push_back({arg.name(), 0, real_dtype}); + num_fusion_inputs++; + } else if (op == "Add") { + ASSERT_EQ(real_dtype, fusion_tensors[i].dtype()); + Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)), + Input::Initializer(fusion_tensors[i])); + fusion_inputs.push_back({arg.name(), 0, real_dtype}); + num_fusion_inputs++; + } else if (op == "LeakyRelu") { + has_leaky_relu = true; + } + } + NodeDef fused_matmul; + std::vector add_nodes; + std::vector outputs; + std::vector inputs; + inputs.push_back({"x_input", 0, qinput_dtype}); + inputs.push_back({"y_input", 0, DT_QINT8}); + inputs.insert(std::end(inputs), std::begin(fusion_inputs), + std::end(fusion_inputs)); + inputs.push_back({"x_min", 0, DT_FLOAT}); + inputs.push_back({"x_max", 0, DT_FLOAT}); + inputs.push_back({"y_min", 0, DT_FLOAT}); + inputs.push_back({"y_max", 0, DT_FLOAT}); + std::vector extended_fused_ops(fused_ops); + DataType out_dtype; + if (requantize) { + if (output_quant_mode == "SCALED") { + out_dtype = DT_QINT8; + } else { + out_dtype = DT_QUINT8; + } + } else { + out_dtype = real_dtype; + } + std::vector output_dtypes; + if (requantize) { + Output out_min = ops::Const(root.WithOpName("output_min"), output_min); + Output out_max = ops::Const(root.WithOpName("output_max"), output_max); + inputs.push_back({"output_min", 0, DT_FLOAT}); + inputs.push_back({"output_max", 0, DT_FLOAT}); + extended_fused_ops.push_back("Requantize"); + output_dtypes = {out_dtype, DT_FLOAT, DT_FLOAT}; + } else { + extended_fused_ops.push_back("Dequantize"); + output_dtypes = {out_dtype}; + } + + TF_EXPECT_OK(NodeDefBuilder("quantized_fused_matmul", "_QuantizedMatMul") + .Attr("Tdevice_inputs", std::vector()) + .Input(FakeInput()) + .Input(inputs) + .Attr("Thost_outputs", output_dtypes) + .Attr("Tdevice_outputs", std::vector()) + .Attr("T1", qinput_dtype) + .Attr("T2", DT_QINT8) + .Attr("Tbias", real_dtype) + .Attr("Tout", out_dtype) + .Attr("U", real_dtype) + .Attr("transpose_a", transpose_x) + .Attr("transpose_b", transpose_y) + .Attr("fused_ops", extended_fused_ops) + .Attr("leakyrelu_alpha", + has_leaky_relu ? this->leakyrelu_alpha_ : 0.2f) + .Attr("input_quant_mode", input_quant_mode) + .Attr("output_quant_mode", output_quant_mode) + .Finalize(&fused_matmul)); + if (requantize) { + NodeDef dequantize; + TF_EXPECT_OK(NodeDefBuilder("dequantize", "Dequantize") + .Input({"quantized_fused_matmul", 0, out_dtype}) + .Input({"quantized_fused_matmul", 1, DT_FLOAT}) + .Input({"quantized_fused_matmul", 2, DT_FLOAT}) + .Attr("dtype", real_dtype) + .Attr("mode", output_quant_mode) + .Finalize(&dequantize)); + add_nodes = {&fused_matmul, &dequantize}; + RunAndFetch(root, {dequantize.name()}, &outputs, add_nodes); + } else { + add_nodes = {&fused_matmul}; + RunAndFetch(root, {fused_matmul.name()}, &outputs, add_nodes); + } + *result = outputs[0]; + } + + template + void VerifyTensorsNear(const std::vector& x_dims, + const std::vector& y_dims, + const FusedOpsAndDims& fused_ops_and_dims, + const GraphRunner& run_default, + const FusedGraphRunner& run_fused, bool transpose_x, + bool transpose_y, const double atol = 1e-5, + // The following arguments are used by quantized fusion + string input_quant_mode = "SCALED", + string output_quant_mode = "SCALED", + bool is_bias_quantized = false, + bool is_perchannel = false, bool requantize = false) { + DataType dtype = DataTypeToEnum::v(); + TensorShape x_shape = TensorShape(x_dims); + TensorShape y_shape = TensorShape(y_dims); + + Tensor x_tensor(dtype, x_shape); + x_tensor.flat().setRandom(); + x_tensor.flat() -= x_tensor.flat().constant(static_cast(0.5)); + + Tensor y_tensor(dtype, y_shape); + y_tensor.flat().setRandom(); + y_tensor.flat() -= y_tensor.flat().constant(static_cast(0.5)); + + FusedOpsAndTensors fused_ops_and_tensors; + fused_ops_and_tensors.fused_ops = fused_ops_and_dims.fused_ops; + const auto& fused_ops = fused_ops_and_tensors.fused_ops; // Alias to field + const auto& fusion_dims = fused_ops_and_dims.fusion_dims; // Alias to field + auto& fusion_tensors = fused_ops_and_tensors.fusion_tensors; + for (int i = 0; i < fused_ops.size(); ++i) { + TensorShape arg_shape = TensorShape(fusion_dims[i]); + Tensor arg_tensor(dtype, arg_shape); + arg_tensor.flat().setRandom(); + arg_tensor.flat() -= + arg_tensor.flat().constant(static_cast(0.5)); + fusion_tensors.push_back(arg_tensor); + } + Tensor default_result; + run_default(x_tensor, y_tensor, fused_ops_and_tensors, &default_result, + transpose_x, transpose_y); + + Tensor fused_result; + if constexpr (std::is_same::value) { + float output_min = 1.0; + float output_max = 1.0 + std::numeric_limits::epsilon(); + if (requantize) { + T min; + T max; + MklTestingUtil::ComputeMinMax(default_result, &min, &max); + output_min = static_cast(min); + output_max = static_cast(max); + } + // Run quantized fusion. + run_fused(x_tensor, y_tensor, fused_ops_and_tensors, &fused_result, + transpose_x, transpose_y, input_quant_mode, output_quant_mode, + is_bias_quantized, is_perchannel, requantize, output_min, + output_max); + } else { + // Run realnumber type fusion. + run_fused(x_tensor, y_tensor, fused_ops_and_tensors, &fused_result, + transpose_x, transpose_y); + } + std::vector> tensor_pairs = { + {default_result, fused_result}}; + for (auto& pair : tensor_pairs) { + const Tensor& expected = pair.first; + const Tensor& evaluated = pair.second; + + ASSERT_EQ(expected.dtype(), evaluated.dtype()); + ASSERT_EQ(expected.shape(), evaluated.shape()); + + test::ExpectClose(expected, evaluated, atol); + } + } + + void GetFusionConfiguration(const std::vector& fused_ops, + const int row, const int col, + FusedOpsAndDims* fused_ops_and_dims) { + if (fused_ops == std::vector{"BiasAdd"}) { + *fused_ops_and_dims = {fused_ops, {std::vector{col}}}; + } else if (fused_ops == std::vector{"BiasAdd", "Relu"} || + fused_ops == std::vector{"BiasAdd", "Relu6"} || + fused_ops == std::vector{"BiasAdd", "LeakyRelu"} || + fused_ops == std::vector{"BiasAdd", "Elu"} || + fused_ops == std::vector{"BiasAdd", "Tanh"} || + fused_ops == std::vector{"BiasAdd", "Sigmoid"} || + fused_ops == std::vector{"BiasAdd", "GeluApproximate"}) { + *fused_ops_and_dims = { + fused_ops, {std::vector{col}, std::vector{0}}}; + } else if (fused_ops == std::vector{"BiasAdd", "Add"}) { + *fused_ops_and_dims = { + fused_ops, + {std::vector{col}, std::vector{row, col}}}; + } else { + EXPECT_TRUE(false) << absl::StrCat("The fusion: [", + absl::StrJoin(fused_ops, ","), + "] is not supported in this test."); + } + } + + void VerifyFusedMatMul(std::vector fused_ops) { + const GraphRunner run_default = + [&](const Tensor& x, const Tensor& y, + const FusedOpsAndTensors& fused_ops_and_tensors, Tensor* result, + bool transpose_x, bool transpose_y) { + this->RunMatMulAndFusedOps(x, y, fused_ops_and_tensors, result, + transpose_x, transpose_y); + }; + + const GraphRunner run_fused = + [&](const Tensor& x, const Tensor& y, + const FusedOpsAndTensors& fused_ops_and_tensors, Tensor* result, + bool transpose_x, bool transpose_y) { + this->RunFusedMatMul(x, y, fused_ops_and_tensors, result, transpose_x, + transpose_y); + }; + const double atol = std::is_same::value ? 1e-2 : 1e-5; + constexpr int M = 3; + constexpr int K = 4; + constexpr int N = 5; + bool transpose_x = false; // OpKernel does not support transpose_x. + std::vector x_dims; + std::vector y_dims; + FusedOpsAndDims fused_ops_and_dims; + GetFusionConfiguration(fused_ops, M, N, &fused_ops_and_dims); + for (bool transpose_y : {false, true}) { + x_dims = + transpose_x ? std::vector{K, M} : std::vector{M, K}; + y_dims = + transpose_y ? std::vector{N, K} : std::vector{K, N}; + VerifyTensorsNear(x_dims, y_dims, fused_ops_and_dims, + run_default, run_fused, transpose_x, + transpose_y, atol); + } + } + + // The following test runs with 32 configurations. + // (1) input quantization mode : {"MIN_FIRST", "SCALED"} + // (2) input quantization mode : {"MIN_FIRST", "SCALED"} + // (3) weight quantization per_channel : {false, true} + // (4) output is requantized or dequantized: + // false: dequantized + // true: requantized + // (5) weight matrix is transposed : {false, true} + void VerifyQuantizedMatMul(std::vector fused_ops) { + if (!HasQuantizationSupport()) { + GTEST_SKIP() << "oneDNN based Quantized ops are not enabled on this CPU."; + } + const GraphRunner run_default = + [&](const Tensor& x, const Tensor& y, + const FusedOpsAndTensors& fused_ops_and_tensors, Tensor* result, + bool transpose_x, bool transpose_y) { + this->RunMatMulAndFusedOps(x, y, fused_ops_and_tensors, result, + transpose_x, transpose_y); + }; + + const QuantizedGraphRunner run_quantized = + [&](const Tensor& x, const Tensor& y, + const FusedOpsAndTensors& fused_ops_and_tensors, Tensor* result, + bool transpose_x, bool transpose_y, string input_quant_mode, + string output_quant_mode, bool is_bias_quantized, + bool is_perchannel, bool requantize, float output_min, + float output_max) { + this->RunQuantizedMatMul( + x, y, fused_ops_and_tensors, result, transpose_x, transpose_y, + input_quant_mode, output_quant_mode, is_bias_quantized, + is_perchannel, requantize, output_min, output_max); + }; + + const double atol = 1e-2; + constexpr int M = 3; + constexpr int K = 4; + constexpr int N = 5; + bool transpose_x = false; // OpKernel does not support transpose_x. + std::vector x_dims; + std::vector y_dims; + FusedOpsAndDims fused_ops_and_dims; + GetFusionConfiguration(fused_ops, M, N, &fused_ops_and_dims); + std::vector requantization_config; + if (fused_ops == std::vector{"BiasAdd", "Add"}) { + // MatMul + BiasAdd + Add + Requantize fusion is not supported yet. + requantization_config = {false}; + } else { + requantization_config = {false, true}; + } + for (bool transpose_y : {false, true}) { + x_dims = + transpose_x ? std::vector{K, M} : std::vector{M, K}; + y_dims = + transpose_y ? std::vector{N, K} : std::vector{K, N}; + for (bool per_channel : {false, true}) { + for (string input_quant_mode : {"MIN_FIRST", "SCALED"}) { + for (string output_quant_mode : {"MIN_FIRST", "SCALED"}) { + for (bool requantize : requantization_config) { + VerifyTensorsNear( + x_dims, y_dims, fused_ops_and_dims, run_default, + run_quantized, transpose_x, transpose_y, atol, + input_quant_mode, output_quant_mode, false, per_channel, + requantize); + } + } + } + } + } + } +}; + +TYPED_TEST_SUITE_P(FusedMatMulOpsTest); + +// Realnumber typed test. +TYPED_TEST_P(FusedMatMulOpsTest, BiasAddGeluApproximate) { + this->VerifyFusedMatMul({"BiasAdd", "GeluApproximate"}); +} + +// The following tests are for quantized fusions. +TYPED_TEST_P(FusedMatMulOpsTest, Quantized_BiasAdd) { + this->VerifyQuantizedMatMul({"BiasAdd"}); +} + +TYPED_TEST_P(FusedMatMulOpsTest, Quantized_BiasAddRelu) { + this->VerifyQuantizedMatMul({"BiasAdd", "Relu"}); +} + +TYPED_TEST_P(FusedMatMulOpsTest, Quantized_BiasAddRelu6) { + this->VerifyQuantizedMatMul({"BiasAdd", "Relu6"}); +} + +TYPED_TEST_P(FusedMatMulOpsTest, Quantized_BiasAddLeakyRelu) { + this->VerifyQuantizedMatMul({"BiasAdd", "LeakyRelu"}); +} + +TYPED_TEST_P(FusedMatMulOpsTest, Quantized_BiasAddElu) { + this->VerifyQuantizedMatMul({"BiasAdd", "Elu"}); +} + +TYPED_TEST_P(FusedMatMulOpsTest, Quantized_BiasAddTanh) { + this->VerifyQuantizedMatMul({"BiasAdd", "Tanh"}); +} + +TYPED_TEST_P(FusedMatMulOpsTest, Quantized_BiasAddSigmoid) { + this->VerifyQuantizedMatMul({"BiasAdd", "Sigmoid"}); +} + +TYPED_TEST_P(FusedMatMulOpsTest, Quantized_BiasAddGeluApproximate) { + this->VerifyQuantizedMatMul({"BiasAdd", "GeluApproximate"}); +} + +TYPED_TEST_P(FusedMatMulOpsTest, Quantized_BiasAddAdd) { + this->VerifyQuantizedMatMul({"BiasAdd", "Add"}); +} + +REGISTER_TYPED_TEST_SUITE_P(FusedMatMulOpsTest, BiasAddGeluApproximate, + Quantized_BiasAdd, Quantized_BiasAddRelu, + Quantized_BiasAddRelu6, Quantized_BiasAddLeakyRelu, + Quantized_BiasAddElu, Quantized_BiasAddTanh, + Quantized_BiasAddSigmoid, + Quantized_BiasAddGeluApproximate, + Quantized_BiasAddAdd); + +// TODO(intel-tf): Add bfloat16 to Types when PR#56613 is merged. +using DataTypes = ::testing::Types; + +INSTANTIATE_TYPED_TEST_SUITE_P(Test, FusedMatMulOpsTest, DataTypes); + +} // namespace tensorflow + +#endif // INTEL_MKL diff --git a/tensorflow/core/kernels/sparse_reduce_op.cc b/tensorflow/core/kernels/sparse_reduce_op.cc index 348a73e0816280..8f875053c66579 100644 --- a/tensorflow/core/kernels/sparse_reduce_op.cc +++ b/tensorflow/core/kernels/sparse_reduce_op.cc @@ -17,6 +17,7 @@ limitations under the License. #define EIGEN_USE_THREADS +#include "absl/status/status.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" @@ -47,13 +48,14 @@ struct ReduceDetails { // Compute common reduce parameters that'll be used for SparseTensor // reductions. Usage: -// ReduceDetails reduction = SparseTensorReduceHelper(sp, axes, keep_dims); -// sp.Reorder(reduction.reorder_dims); -// for (const auto& g : sp.group(reduction.group_by_dims)) { +// StatusOr reduction = +// SparseTensorReduceHelper(sp, axes, keep_dims); +// sp.Reorder(reduction->reorder_dims); +// for (const auto& g : sp.group(reduction->group_by_dims)) { // ... // } -// // Set output shape to reduction.reduced_shape. -ReduceDetails SparseTensorReduceHelper(const SparseTensor &sp, +// // Set output shape to reduction->reduced_shape. +absl::StatusOr SparseTensorReduceHelper(const SparseTensor &sp, absl::Span axes_slice, bool keep_dims) { ReduceDetails reduction; @@ -101,7 +103,11 @@ ReduceDetails SparseTensorReduceHelper(const SparseTensor &sp, out_dim_sizes = sp.PickDims(reduction.group_by_dims); } - reduction.reduced_shape = TensorShape(out_dim_sizes); + absl::Status success = + TensorShape::BuildTensorShape(out_dim_sizes, &reduction.reduced_shape); + if (!success.ok()) { + return success; + } return reduction; } @@ -181,8 +187,10 @@ class SparseReduceOp : public OpKernel { OP_REQUIRES_OK(ctx, SparseTensor::Create( tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t), shape, &sp)); - ReduceDetails reduction = SparseTensorReduceHelper( + absl::StatusOr reduction_or = SparseTensorReduceHelper( sp, reduction_axes_t->flat(), keep_dims_); + OP_REQUIRES_OK(ctx, reduction_or.status()); + ReduceDetails reduction = *reduction_or; Tensor *out_values; OP_REQUIRES_OK( @@ -287,8 +295,10 @@ class SparseReduceSparseOp : public OpKernel { OP_REQUIRES_OK(ctx, SparseTensor::Create(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t), shape, &sp)); - ReduceDetails reduction = SparseTensorReduceHelper( + absl::StatusOr reduction_or = SparseTensorReduceHelper( sp, reduction_axes_t->flat(), keep_dims_); + OP_REQUIRES_OK(ctx, reduction_or.status()); + ReduceDetails reduction = *reduction_or; sp.Reorder(reduction.reorder_dims); // Count nnzs in the output SparseTensor. diff --git a/tensorflow/core/kernels/string_to_number_op.cc b/tensorflow/core/kernels/string_to_number_op.cc index d0ee2a96e4572a..7d1553874d9a7a 100644 --- a/tensorflow/core/kernels/string_to_number_op.cc +++ b/tensorflow/core/kernels/string_to_number_op.cc @@ -16,6 +16,8 @@ limitations under the License. // See docs in ../ops/parse_ops.cc. #include + +#include #include #include "tensorflow/core/framework/kernel_def_builder.h" @@ -68,6 +70,8 @@ REGISTER(float); REGISTER(double); REGISTER(int32); REGISTER(int64_t); +REGISTER(uint32_t); +REGISTER(uint64_t); #undef REGISTER } // namespace tensorflow diff --git a/tensorflow/core/lib/monitoring/BUILD b/tensorflow/core/lib/monitoring/BUILD index b832e86aedaf46..62e26fa28d30c9 100644 --- a/tensorflow/core/lib/monitoring/BUILD +++ b/tensorflow/core/lib/monitoring/BUILD @@ -9,14 +9,6 @@ package( default_visibility = [ # tensorflow/core:lib effectively exposes all targets under tensorflow/core/lib/** "//tensorflow/core:__pkg__", - # tensorflow/core/platform:monitoring depends on this package - "//tensorflow/core/platform:__subpackages__", - # tensorflow/compiler/xla/pjrt:metrics depends on this package - "@local_xla//xla/pjrt:__subpackages__", - "@local_xla//xla/service/gpu:__subpackages__", - # tensorflow/compiler/mlir/tfrt:tf_jitrt depends on this package - "//tensorflow/compiler/mlir/tfrt:__subpackages__", - "@local_tsl//tsl/lib/monitoring:__subpackages__", ], licenses = ["notice"], ) @@ -98,7 +90,6 @@ cc_library( visibility = [ "//learning/brain/google/monitoring:__subpackages__", "//tensorflow/core:__subpackages__", - "@local_tsl//tsl:__subpackages__", ], deps = [ ":collected_metrics", @@ -148,11 +139,7 @@ cc_library( cc_library( name = "metric_def", hdrs = ["metric_def.h"], - visibility = [ - "//learning/brain/google/monitoring:__subpackages__", - "//tensorflow/core:__subpackages__", - "@local_tsl//tsl/lib/monitoring:__subpackages__", - ], + visibility = ["//tensorflow/core:__subpackages__"], deps = [ ":types", "//tensorflow/core/framework:summary_proto_cc", diff --git a/tensorflow/core/lib/strings/BUILD b/tensorflow/core/lib/strings/BUILD index 2f04ae43b027a3..72eb0a6dac308c 100644 --- a/tensorflow/core/lib/strings/BUILD +++ b/tensorflow/core/lib/strings/BUILD @@ -43,6 +43,7 @@ cc_library( "//tensorflow/core/platform:macros", "//tensorflow/core/platform:stringpiece", "//tensorflow/core/platform:types", + "@local_tsl//tsl/lib/core:bits", ], ) diff --git a/tensorflow/core/lib/strings/ordered_code.cc b/tensorflow/core/lib/strings/ordered_code.cc index 8d3f0a66d7977d..65210dc28d1467 100644 --- a/tensorflow/core/lib/strings/ordered_code.cc +++ b/tensorflow/core/lib/strings/ordered_code.cc @@ -20,6 +20,7 @@ limitations under the License. #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/stringpiece.h" +#include "tsl/lib/core/bits.h" namespace tensorflow { namespace strings { @@ -403,49 +404,15 @@ static const uint64 kLengthToMask[1 + kMaxSigned64Length] = { // For positive numbers, the number of bits is 1 plus the most significant // bit position (the highest bit position in a positive int64 is 63). // For a negative number n, we count the bits in ~n. -// That is, length = kBitsToLength[Bits::Log2Floor64(n < 0 ? ~n : n) + 1]. +// That is, length = kBitsToLength[tsl::Log2Floor64(n < 0 ? ~n : n) + 1]. static const int8 kBitsToLength[1 + 63] = { 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10}; -#if defined(__GNUC__) -// Returns floor(lg(n)). Returns -1 if n == 0. -static int Log2Floor64(uint64 n) { - return n == 0 ? -1 : 63 ^ __builtin_clzll(n); -} -#else -// Portable slow version -static int Log2Floor32_Portable(uint32 n) { - if (n == 0) return -1; - int log = 0; - uint32 value = n; - for (int i = 4; i >= 0; --i) { - int shift = (1 << i); - uint32 x = value >> shift; - if (x != 0) { - value = x; - log += shift; - } - } - assert(value == 1); - return log; -} -// Returns floor(lg(n)). Returns -1 if n == 0. -static int Log2Floor64(uint64 n) { - const uint32 topbits = static_cast(n >> 32); - if (topbits == 0) { - // Top bits are zero, so scan in bottom bits - return Log2Floor32_Portable(static_cast(n)); - } else { - return 32 + Log2Floor32_Portable(topbits); - } -} -#endif - // Calculates the encoding length in bytes of the signed number n. static inline int SignedEncodingLength(int64_t n) { - return kBitsToLength[Log2Floor64(n < 0 ? ~n : n) + 1]; + return kBitsToLength[tsl::Log2Floor64(n < 0 ? ~n : n) + 1]; } static void StoreBigEndian64(char* dst, uint64 v) { @@ -494,7 +461,7 @@ bool OrderedCode::ReadSignedNumIncreasing(StringPiece* src, int64_t* result) { int len; uint64 x; if (first_byte != 0xff) { - len = 7 - Log2Floor64(first_byte ^ 0xff); + len = 7 - tsl::Log2Floor64(first_byte ^ 0xff); if (src->size() < static_cast(len)) return false; x = xor_mask; // sign extend using xor_mask for (int i = 0; i < len; ++i) diff --git a/tensorflow/core/nccl/collective_communicator.cc b/tensorflow/core/nccl/collective_communicator.cc index 1555ed9aac46a0..ac62f471087a21 100644 --- a/tensorflow/core/nccl/collective_communicator.cc +++ b/tensorflow/core/nccl/collective_communicator.cc @@ -21,7 +21,6 @@ limitations under the License. #include "absl/memory/memory.h" #include "tensorflow/core/nccl/nccl_manager.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/traceme.h" namespace tensorflow { diff --git a/tensorflow/core/ops/compat/op_compatibility_lib.h b/tensorflow/core/ops/compat/op_compatibility_lib.h index 2f26fd63bdd7a5..1693f2bd5cfe27 100644 --- a/tensorflow/core/ops/compat/op_compatibility_lib.h +++ b/tensorflow/core/ops/compat/op_compatibility_lib.h @@ -52,7 +52,11 @@ class OpCompatibilityLib { // Should match the contents of ops_file(). Run before calling // ValidateCompatible(). - string OpsString() const { return op_list_.DebugString(); } + string OpsString() const { + string result; + google::protobuf::TextFormat::PrintToString(op_list_, &result); + return result; + } // Returns the number of ops in OpsString(), includes all ops, not // just stable ops. diff --git a/tensorflow/core/ops/compat/ops_history_v2/Abort.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Abort.pbtxt index 5d88f788a8cda1..4752385d6ecf6d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Abort.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Abort.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Abort" attr { name: "error_msg" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Abs.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Abs.pbtxt index e901bc5794f682..5f44f9c6dca631 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Abs.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Abs.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Abs" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AccumulateNV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AccumulateNV2.pbtxt index 7a44ffd9fda37d..3e94aa154434f3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AccumulateNV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AccumulateNV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AccumulateNV2" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AccumulatorApplyGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AccumulatorApplyGradient.pbtxt index e920df37858235..dd1c9870a63327 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AccumulatorApplyGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AccumulatorApplyGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AccumulatorApplyGradient" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AccumulatorNumAccumulated.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AccumulatorNumAccumulated.pbtxt index 131af0c1b5b24a..f378509e1e02f9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AccumulatorNumAccumulated.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AccumulatorNumAccumulated.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AccumulatorNumAccumulated" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AccumulatorSetGlobalStep.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AccumulatorSetGlobalStep.pbtxt index 0f50ac1852b834..9b4170df332a6a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AccumulatorSetGlobalStep.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AccumulatorSetGlobalStep.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AccumulatorSetGlobalStep" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AccumulatorTakeGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AccumulatorTakeGradient.pbtxt index de3a79650b8f9d..1e28a68455fbf7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AccumulatorTakeGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AccumulatorTakeGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AccumulatorTakeGradient" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Acos.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Acos.pbtxt index 577976c6e4c579..504486147b2d30 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Acos.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Acos.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Acos" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Acosh.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Acosh.pbtxt index 136b9bfb209241..e53c8177f7f2f7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Acosh.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Acosh.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Acosh" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Add.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Add.pbtxt index d1f87827922fac..ce30e6d5544d04 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Add.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Add.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Add" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AddManySparseToTensorsMap.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AddManySparseToTensorsMap.pbtxt index 433a7f40b042e9..c1433ccbaf9e3b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AddManySparseToTensorsMap.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AddManySparseToTensorsMap.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AddManySparseToTensorsMap" input_arg { name: "sparse_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AddN.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AddN.pbtxt index 106bb1a9a9b7ae..8935c304edc5a8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AddN.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AddN.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AddN" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AddSparseToTensorsMap.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AddSparseToTensorsMap.pbtxt index 8c226e1965ce41..8a4c020d067b18 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AddSparseToTensorsMap.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AddSparseToTensorsMap.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AddSparseToTensorsMap" input_arg { name: "sparse_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AddV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AddV2.pbtxt index ad79c179db20ab..a2be10b4abe51e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AddV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AddV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AddV2" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AdjustContrast.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AdjustContrast.pbtxt index b2dc5e78e4b4e6..e51900d718bcc6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AdjustContrast.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AdjustContrast.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AdjustContrast" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AdjustContrastv2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AdjustContrastv2.pbtxt index 08fc84a5ab0cb0..6869f269dadf87 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AdjustContrastv2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AdjustContrastv2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AdjustContrastv2" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AdjustHue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AdjustHue.pbtxt index 6cce51abdc8b28..9a6c72d3d8f515 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AdjustHue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AdjustHue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AdjustHue" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AdjustSaturation.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AdjustSaturation.pbtxt index 4bc90aec904be0..918ea188d8523b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AdjustSaturation.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AdjustSaturation.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AdjustSaturation" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/All.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/All.pbtxt index 0afd8d468145d4..c0bc8f4beae4fc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/All.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/All.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "All" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AllCandidateSampler.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AllCandidateSampler.pbtxt index ff93f20c348920..e452850c261223 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AllCandidateSampler.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AllCandidateSampler.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AllCandidateSampler" input_arg { name: "true_classes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AllToAll.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AllToAll.pbtxt index 23796980cd62fc..005d16aec0d9e6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AllToAll.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AllToAll.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AllToAll" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Angle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Angle.pbtxt index 5a1ff8b86c421c..ce28927f2b8118 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Angle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Angle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Angle" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousHashTable.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousHashTable.pbtxt index 1a62f991251223..15826399d357e4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousHashTable.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousHashTable.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousHashTable" output_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousIterator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousIterator.pbtxt index 8094c8d79d6511..bf8f8fc2ed49d8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousIterator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousIterator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousIterator" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousIteratorV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousIteratorV2.pbtxt index 6b6dad7212bfe9..e7dca69e3e041c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousIteratorV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousIteratorV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousIteratorV2" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousIteratorV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousIteratorV3.pbtxt index 328434c6042ce2..15e975a4453c2e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousIteratorV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousIteratorV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousIteratorV3" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMemoryCache.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMemoryCache.pbtxt index b3ab7638e496a9..7f15df3e956212 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMemoryCache.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMemoryCache.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousMemoryCache" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMultiDeviceIterator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMultiDeviceIterator.pbtxt index a19b45630b5d2f..b8afaa363d6a66 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMultiDeviceIterator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMultiDeviceIterator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousMultiDeviceIterator" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMultiDeviceIteratorV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMultiDeviceIteratorV3.pbtxt index 20d70a6acc9855..c503bf5c9d3e83 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMultiDeviceIteratorV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMultiDeviceIteratorV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousMultiDeviceIteratorV3" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableDenseHashTable.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableDenseHashTable.pbtxt index f14a62b4faa484..6026fd263d36ff 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableDenseHashTable.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableDenseHashTable.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousMutableDenseHashTable" input_arg { name: "empty_key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableHashTable.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableHashTable.pbtxt index b375400621b8ee..6f34858f4d3bd3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableHashTable.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableHashTable.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousMutableHashTable" output_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableHashTableOfTensors.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableHashTableOfTensors.pbtxt index 692d1963adffc8..21b3744a98b973 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableHashTableOfTensors.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousMutableHashTableOfTensors.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousMutableHashTableOfTensors" output_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousRandomSeedGenerator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousRandomSeedGenerator.pbtxt index e805c04147a283..da2558b596ac6b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousRandomSeedGenerator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousRandomSeedGenerator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousRandomSeedGenerator" input_arg { name: "seed" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AnonymousSeedGenerator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AnonymousSeedGenerator.pbtxt index c2b9eee9b6a976..370b0460e22f3c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AnonymousSeedGenerator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AnonymousSeedGenerator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AnonymousSeedGenerator" input_arg { name: "seed" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Any.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Any.pbtxt index 9b5d6350b83cfc..da020906a6d358 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Any.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Any.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Any" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdaMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdaMax.pbtxt index fe6e128a18034c..06f6134c329f5a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdaMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdaMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyAdaMax" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdadelta.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdadelta.pbtxt index 211c8d4a64efca..477fd7e824e958 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdadelta.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdadelta.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyAdadelta" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagrad.pbtxt index 5ede2bc76ea499..3da12f57396533 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyAdagrad" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagradDA.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagradDA.pbtxt index 3d00cb6f01bc9f..6795b12ea72d2f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagradDA.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagradDA.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyAdagradDA" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagradV2.pbtxt index f7673fe102988f..12f70b41e8c237 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdagradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyAdagradV2" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdam.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdam.pbtxt index fdcf81e0d03a74..ae512d525f0a76 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyAdam.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyAdam.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyAdam" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyAddSign.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyAddSign.pbtxt index 9e485fcfec65c2..f489857b9940d1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyAddSign.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyAddSign.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyAddSign" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyCenteredRMSProp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyCenteredRMSProp.pbtxt index 3e50dc38c7c12f..908b913f567d1c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyCenteredRMSProp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyCenteredRMSProp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyCenteredRMSProp" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyFtrl.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyFtrl.pbtxt index 6a67647bad7fae..2025575439504e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyFtrl.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyFtrl.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyFtrl" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyFtrlV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyFtrlV2.pbtxt index 201d61b2fa8373..d2d1af5d40ccb6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyFtrlV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyFtrlV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyFtrlV2" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyGradientDescent.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyGradientDescent.pbtxt index 25fb5723ebdaf9..8451d74ea1c4ba 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyGradientDescent.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyGradientDescent.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyGradientDescent" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyMomentum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyMomentum.pbtxt index 289c6ea6151f89..f54384d4fbd58f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyMomentum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyMomentum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyMomentum" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyPowerSign.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyPowerSign.pbtxt index fb3838c02a2e16..f2f8f79d5bad9b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyPowerSign.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyPowerSign.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyPowerSign" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyProximalAdagrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyProximalAdagrad.pbtxt index c25959517fcd7b..632d4d5ff91e2f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyProximalAdagrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyProximalAdagrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyProximalAdagrad" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyProximalGradientDescent.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyProximalGradientDescent.pbtxt index 3482b511cffd59..6a9c4e9ae6d27c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyProximalGradientDescent.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyProximalGradientDescent.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyProximalGradientDescent" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApplyRMSProp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApplyRMSProp.pbtxt index 24fe49118c943d..77a1e75f1f959f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApplyRMSProp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApplyRMSProp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApplyRMSProp" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApproxTopK.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApproxTopK.pbtxt index a7a32594df6aa1..b97ee1259579ee 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApproxTopK.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApproxTopK.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApproxTopK" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ApproximateEqual.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ApproximateEqual.pbtxt index be01e038fdef3a..0b62abc45a378d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ApproximateEqual.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ApproximateEqual.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ApproximateEqual" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ArgMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ArgMax.pbtxt index 90679987916abd..0cfef9a64eb544 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ArgMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ArgMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ArgMax" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ArgMin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ArgMin.pbtxt index d2113ec185ec1d..4b83dd746cc3c5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ArgMin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ArgMin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ArgMin" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AsString.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AsString.pbtxt index b2b275470159db..7b985f6126d4a3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AsString.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AsString.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AsString" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Asin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Asin.pbtxt index 690d2a556e4be8..652e0ea057b672 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Asin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Asin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Asin" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Asinh.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Asinh.pbtxt index 3d78d9de59399d..7f31ec1236ce91 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Asinh.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Asinh.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Asinh" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Assert.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Assert.pbtxt index 343b2bd0b8da08..a891ca8c601309 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Assert.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Assert.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Assert" input_arg { name: "condition" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AssertCardinalityDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AssertCardinalityDataset.pbtxt index 147978a3363667..edf77a307ece00 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AssertCardinalityDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AssertCardinalityDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AssertCardinalityDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AssertNextDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AssertNextDataset.pbtxt index 0bdc9b4f9b278e..f1ebd27d543b14 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AssertNextDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AssertNextDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AssertNextDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AssertPrevDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AssertPrevDataset.pbtxt index 23bced136aa919..62899074c77fd8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AssertPrevDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AssertPrevDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AssertPrevDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Assign.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Assign.pbtxt index 6760fe93d26221..9255e12f1a8f11 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Assign.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Assign.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Assign" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AssignAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AssignAdd.pbtxt index eea9bd9ad00ac5..a631b1b807a5ba 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AssignAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AssignAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AssignAdd" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AssignAddVariableOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AssignAddVariableOp.pbtxt index 91adfade5b6bc3..c3a8b74a0daaf8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AssignAddVariableOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AssignAddVariableOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AssignAddVariableOp" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AssignSub.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AssignSub.pbtxt index 3e13c2c8c52e17..0337c992553583 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AssignSub.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AssignSub.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AssignSub" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AssignSubVariableOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AssignSubVariableOp.pbtxt index a93f5576589e29..a5c9a567d077ae 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AssignSubVariableOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AssignSubVariableOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AssignSubVariableOp" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AssignVariableOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AssignVariableOp.pbtxt index fa84589b303260..e7c5290c53ecdb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AssignVariableOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AssignVariableOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AssignVariableOp" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AssignVariableXlaConcatND.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AssignVariableXlaConcatND.pbtxt index 8a17f4cadc3417..7c9d5df2c9c8c8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AssignVariableXlaConcatND.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AssignVariableXlaConcatND.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AssignVariableXlaConcatND" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Atan.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Atan.pbtxt index 78fd00eea4d39b..b063bf81719aed 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Atan.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Atan.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Atan" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Atan2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Atan2.pbtxt index 9ae51baed83ca7..e58675db4c19d0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Atan2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Atan2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Atan2" input_arg { name: "y" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Atanh.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Atanh.pbtxt index 76aaad73963159..28d417a08544e9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Atanh.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Atanh.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Atanh" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AudioSpectrogram.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AudioSpectrogram.pbtxt index 96bf31789fcec6..dbc2a2280dee3c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AudioSpectrogram.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AudioSpectrogram.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AudioSpectrogram" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AudioSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AudioSummary.pbtxt index 0a33e5ca25463e..4b1830595e07e4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AudioSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AudioSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AudioSummary" input_arg { name: "tag" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AudioSummaryV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AudioSummaryV2.pbtxt index e92012709af98d..313c044aaeb506 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AudioSummaryV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AudioSummaryV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AudioSummaryV2" input_arg { name: "tag" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AutoShardDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AutoShardDataset.pbtxt index 6ab98040d710eb..465b757c8e967b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AutoShardDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AutoShardDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AutoShardDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AvgPool.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AvgPool.pbtxt index f7472845fbcbd4..8e7db139a9a3c7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AvgPool.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AvgPool.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AvgPool" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AvgPool3D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AvgPool3D.pbtxt index 619b1b4c4eaa70..f3f60cbc1f18d2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AvgPool3D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AvgPool3D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AvgPool3D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AvgPool3DGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AvgPool3DGrad.pbtxt index 3c74eac2382c39..67fef9572878db 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AvgPool3DGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AvgPool3DGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AvgPool3DGrad" input_arg { name: "orig_input_shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/AvgPoolGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/AvgPoolGrad.pbtxt index f3952fab895c1f..6c72effaffaa43 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/AvgPoolGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/AvgPoolGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "AvgPoolGrad" input_arg { name: "orig_input_shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BandedTriangularSolve.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BandedTriangularSolve.pbtxt index c69a9650010a38..5cf85a62392a2f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BandedTriangularSolve.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BandedTriangularSolve.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BandedTriangularSolve" input_arg { name: "matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Barrier.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Barrier.pbtxt index bdc8f8f53ba666..9391157b888851 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Barrier.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Barrier.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Barrier" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BarrierClose.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BarrierClose.pbtxt index 4d121c65796ef0..69230484813264 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BarrierClose.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BarrierClose.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BarrierClose" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BarrierIncompleteSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BarrierIncompleteSize.pbtxt index cb040bc1db3c0f..0d17c183684932 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BarrierIncompleteSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BarrierIncompleteSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BarrierIncompleteSize" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BarrierInsertMany.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BarrierInsertMany.pbtxt index db48fd4b4554d9..86b64f603ebb0a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BarrierInsertMany.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BarrierInsertMany.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BarrierInsertMany" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BarrierReadySize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BarrierReadySize.pbtxt index 8d214c6e6fedf0..e7b063005593aa 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BarrierReadySize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BarrierReadySize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BarrierReadySize" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BarrierTakeMany.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BarrierTakeMany.pbtxt index 71e75d20ca7a08..e324042930451c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BarrierTakeMany.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BarrierTakeMany.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BarrierTakeMany" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Batch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Batch.pbtxt index 24fc21b2f288db..d3ee8d8513d434 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Batch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Batch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Batch" input_arg { name: "in_tensors" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchCholesky.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchCholesky.pbtxt index a8d4223751ce30..5d38acc7c2d563 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchCholesky.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchCholesky.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchCholesky" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchCholeskyGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchCholeskyGrad.pbtxt index 1beef0ebc8dda0..286ae3a81169d6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchCholeskyGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchCholeskyGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchCholeskyGrad" input_arg { name: "l" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchDataset.pbtxt index da9db473d53bbb..39467ae1bb6d33 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchDatasetV2.pbtxt index 4e95dbe7edd433..a3dc3afed0f53a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchFFT.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchFFT.pbtxt index 872afa3d2c76d1..4fe86a392f079b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchFFT.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchFFT.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchFFT" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchFFT2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchFFT2D.pbtxt index 1bd0127c2a50ee..b52a6bdca44512 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchFFT2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchFFT2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchFFT2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchFFT3D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchFFT3D.pbtxt index c1d39ed70f7671..7f19cf13c10573 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchFFT3D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchFFT3D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchFFT3D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchFunction.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchFunction.pbtxt index e35c2c8618eddd..cf5e5896d084ba 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchFunction.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchFunction.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchFunction" input_arg { name: "in_tensors" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT.pbtxt index 256417563cbc66..09d7b4ad7863a0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchIFFT" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT2D.pbtxt index 1c21807dededf1..23cc9cc51df5fc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchIFFT2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT3D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT3D.pbtxt index 3d6ddd79d52095..10a78fab914335 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT3D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchIFFT3D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchIFFT3D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatMul.pbtxt index 9d7ac3ca8e2a33..8bd778087f1467 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatMul" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatMulV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatMulV2.pbtxt index 4769d8220f53e1..ed724ddc49db94 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatMulV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatMulV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatMulV2" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatMulV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatMulV3.pbtxt index 1bcfdb937064ca..052a39e77c85ca 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatMulV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatMulV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatMulV3" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixBandPart.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixBandPart.pbtxt index 167fe7b69d484e..413681e612999d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixBandPart.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixBandPart.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatrixBandPart" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDeterminant.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDeterminant.pbtxt index 02e8c5dc93ec83..4bc6081aa4482a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDeterminant.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDeterminant.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatrixDeterminant" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDiag.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDiag.pbtxt index 4b06bc66e2a6dd..6104bef9340001 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDiag.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDiag.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatrixDiag" input_arg { name: "diagonal" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDiagPart.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDiagPart.pbtxt index 611f21ce1a1b67..9bd200f8cf5384 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDiagPart.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixDiagPart.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatrixDiagPart" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixInverse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixInverse.pbtxt index 7e413ef3b05da5..03a694d973a1b5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixInverse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixInverse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatrixInverse" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSetDiag.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSetDiag.pbtxt index 377fe7cea09d92..f459184a0a3a43 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSetDiag.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSetDiag.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatrixSetDiag" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSolve.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSolve.pbtxt index 020873dea15c47..909502e91ab546 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSolve.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSolve.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatrixSolve" input_arg { name: "matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSolveLs.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSolveLs.pbtxt index f6bc34b54f7b77..8c9d24efc7a404 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSolveLs.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixSolveLs.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatrixSolveLs" input_arg { name: "matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixTriangularSolve.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixTriangularSolve.pbtxt index f259cceac49a17..406fa62171f511 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixTriangularSolve.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchMatrixTriangularSolve.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchMatrixTriangularSolve" input_arg { name: "matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchNormWithGlobalNormalization.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchNormWithGlobalNormalization.pbtxt index 846475ea1bfbbf..b9959a7d2dba82 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchNormWithGlobalNormalization.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchNormWithGlobalNormalization.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchNormWithGlobalNormalization" input_arg { name: "t" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchNormWithGlobalNormalizationGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchNormWithGlobalNormalizationGrad.pbtxt index c8b1b878780d0c..170d512aec6406 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchNormWithGlobalNormalizationGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchNormWithGlobalNormalizationGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchNormWithGlobalNormalizationGrad" input_arg { name: "t" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchSelfAdjointEig.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchSelfAdjointEig.pbtxt index e124892ed2d677..42ba04199f7c81 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchSelfAdjointEig.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchSelfAdjointEig.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchSelfAdjointEig" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchSelfAdjointEigV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchSelfAdjointEigV2.pbtxt index bc9128925886cb..df3996ea2378c1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchSelfAdjointEigV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchSelfAdjointEigV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchSelfAdjointEigV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchSvd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchSvd.pbtxt index 7080ed06437879..0595ffcd2a6eb6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchSvd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchSvd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchSvd" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchToSpace.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchToSpace.pbtxt index a96fdc58dfcb32..ac089e5ca76cf0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchToSpace.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchToSpace.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchToSpace" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BatchToSpaceND.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BatchToSpaceND.pbtxt index 18560c832264a4..464beb31614de7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BatchToSpaceND.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BatchToSpaceND.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BatchToSpaceND" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselI0.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselI0.pbtxt index b45aa841a7be67..78d524c916c861 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselI0.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselI0.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselI0" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselI0e.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselI0e.pbtxt index 8696576a64aa23..299cf82535aa06 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselI0e.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselI0e.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselI0e" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselI1.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselI1.pbtxt index 600fdb5771c3c1..e756c4655ddfdf 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselI1.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselI1.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselI1" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselI1e.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselI1e.pbtxt index d7284658a8bbb8..a9c8d0eb0e5a1b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselI1e.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselI1e.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselI1e" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselJ0.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselJ0.pbtxt index 73ee83477debbf..35e14e5fdf173e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselJ0.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselJ0.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselJ0" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselJ1.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselJ1.pbtxt index de8e56a83c2739..ef8814ea8f723a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselJ1.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselJ1.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselJ1" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselK0.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselK0.pbtxt index 6bd13898bf74fa..ebb364d0371e52 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselK0.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselK0.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselK0" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselK0e.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselK0e.pbtxt index 4b1125eb3eac38..e3e680c9549023 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselK0e.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselK0e.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselK0e" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselK1.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselK1.pbtxt index 04aef9f8fe0e5b..f7ca7c2f6e27b1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselK1.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselK1.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselK1" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselK1e.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselK1e.pbtxt index 461db7a9222ea1..96fe68d7b7f313 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselK1e.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselK1e.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselK1e" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselY0.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselY0.pbtxt index 6d97aba4578264..cd62af3477370a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselY0.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselY0.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselY0" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BesselY1.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BesselY1.pbtxt index 6aa5123957547c..06f4c08eaf6932 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BesselY1.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BesselY1.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BesselY1" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Betainc.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Betainc.pbtxt index 330e5456a7afa6..b1523bff9e9807 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Betainc.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Betainc.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Betainc" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BiasAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BiasAdd.pbtxt index 15e7dad6982866..2eba00671d375c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BiasAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BiasAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BiasAdd" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BiasAddGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BiasAddGrad.pbtxt index ea11e9ee5dda20..f85a2b9f6a5c01 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BiasAddGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BiasAddGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BiasAddGrad" input_arg { name: "out_backprop" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BiasAddV1.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BiasAddV1.pbtxt index b1f6b0cc1fc478..b35e45165b8d63 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BiasAddV1.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BiasAddV1.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BiasAddV1" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Bincount.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Bincount.pbtxt index 1e8d56cd7b2dc9..12135bbd54a063 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Bincount.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Bincount.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Bincount" input_arg { name: "arr" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Bitcast.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Bitcast.pbtxt index e3c5814e29872b..993a0c6da9ec65 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Bitcast.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Bitcast.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Bitcast" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BitwiseAnd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BitwiseAnd.pbtxt index 1d8c1eb88c94e3..4b90e0e3de225a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BitwiseAnd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BitwiseAnd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BitwiseAnd" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BitwiseOr.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BitwiseOr.pbtxt index 681b469fc95130..393a506f339896 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BitwiseOr.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BitwiseOr.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BitwiseOr" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BitwiseXor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BitwiseXor.pbtxt index b8d801443cff93..c72b23fc432331 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BitwiseXor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BitwiseXor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BitwiseXor" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BlockLSTM.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BlockLSTM.pbtxt index 2c4ad7866c77ea..63180f534f17b0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BlockLSTM.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BlockLSTM.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BlockLSTM" input_arg { name: "seq_len_max" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMGrad.pbtxt index a03cda1c387f96..e7b6458bc8adb9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BlockLSTMGrad" input_arg { name: "seq_len_max" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMGradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMGradV2.pbtxt index 11b3e4c48f42ea..ed0bd6b245645a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMGradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMGradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BlockLSTMGradV2" input_arg { name: "seq_len_max" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMV2.pbtxt index 35df5226c632c4..5fce517277de83 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BlockLSTMV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BlockLSTMV2" input_arg { name: "seq_len_max" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesAggregateStats.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesAggregateStats.pbtxt index 45fe3a867da0b5..72994094399599 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesAggregateStats.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesAggregateStats.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesAggregateStats" input_arg { name: "node_ids" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesBucketize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesBucketize.pbtxt index 61a170763f1dd8..5f277d3e0db3ae 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesBucketize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesBucketize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesBucketize" input_arg { name: "float_values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestFeatureSplit.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestFeatureSplit.pbtxt index f885b3317006fe..50f35695e006fb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestFeatureSplit.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestFeatureSplit.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesCalculateBestFeatureSplit" input_arg { name: "node_id_range" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestFeatureSplitV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestFeatureSplitV2.pbtxt index 38ddb60c44ff2c..e900ed9c6748ca 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestFeatureSplitV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestFeatureSplitV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesCalculateBestFeatureSplitV2" input_arg { name: "node_id_range" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestGainsPerFeature.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestGainsPerFeature.pbtxt index e5f863eb4ac8ef..f100db7b38671c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestGainsPerFeature.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCalculateBestGainsPerFeature.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesCalculateBestGainsPerFeature" input_arg { name: "node_id_range" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCenterBias.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCenterBias.pbtxt index 78eba4fcb4ba17..5c2fb9b5c54747 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCenterBias.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCenterBias.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesCenterBias" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCreateEnsemble.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCreateEnsemble.pbtxt index 8816746475cf45..cea6d23f91ff7b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCreateEnsemble.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCreateEnsemble.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesCreateEnsemble" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCreateQuantileStreamResource.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCreateQuantileStreamResource.pbtxt index d14e8e40b3a966..3d0d64adcd58fc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCreateQuantileStreamResource.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesCreateQuantileStreamResource.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesCreateQuantileStreamResource" input_arg { name: "quantile_stream_resource_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesDeserializeEnsemble.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesDeserializeEnsemble.pbtxt index 18aeb311841f89..b6d55ea0544c06 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesDeserializeEnsemble.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesDeserializeEnsemble.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesDeserializeEnsemble" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesEnsembleResourceHandleOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesEnsembleResourceHandleOp.pbtxt index 6c79d9a75101cf..00573c1b95a7ec 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesEnsembleResourceHandleOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesEnsembleResourceHandleOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesEnsembleResourceHandleOp" output_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesExampleDebugOutputs.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesExampleDebugOutputs.pbtxt index 8e8f1aa7f332c7..066be042842aef 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesExampleDebugOutputs.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesExampleDebugOutputs.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesExampleDebugOutputs" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesFlushQuantileSummaries.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesFlushQuantileSummaries.pbtxt index d2b84d1e01c5ae..ae35e1023d4fae 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesFlushQuantileSummaries.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesFlushQuantileSummaries.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesFlushQuantileSummaries" input_arg { name: "quantile_stream_resource_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesGetEnsembleStates.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesGetEnsembleStates.pbtxt index a6bc2b2749409e..1959384a36b4a5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesGetEnsembleStates.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesGetEnsembleStates.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesGetEnsembleStates" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesMakeQuantileSummaries.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesMakeQuantileSummaries.pbtxt index 14eacf93d81cdd..bbefa8b8711b1a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesMakeQuantileSummaries.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesMakeQuantileSummaries.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesMakeQuantileSummaries" input_arg { name: "float_values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesMakeStatsSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesMakeStatsSummary.pbtxt index 0772fc259310aa..49a82d2ba0c772 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesMakeStatsSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesMakeStatsSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesMakeStatsSummary" input_arg { name: "node_ids" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesPredict.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesPredict.pbtxt index 7f66b1248fe293..7f176cdc901666 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesPredict.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesPredict.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesPredict" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceAddSummaries.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceAddSummaries.pbtxt index ba12d9cdccfd88..97e875f4ea0bd8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceAddSummaries.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceAddSummaries.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesQuantileStreamResourceAddSummaries" input_arg { name: "quantile_stream_resource_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceDeserialize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceDeserialize.pbtxt index a08188e2dfbcc2..c3f01fef2a49a9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceDeserialize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceDeserialize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesQuantileStreamResourceDeserialize" input_arg { name: "quantile_stream_resource_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceFlush.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceFlush.pbtxt index ca7affea02c882..fc2613a0d372e9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceFlush.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceFlush.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesQuantileStreamResourceFlush" input_arg { name: "quantile_stream_resource_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceGetBucketBoundaries.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceGetBucketBoundaries.pbtxt index b359fce14edefc..b2aa8dd4e72321 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceGetBucketBoundaries.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceGetBucketBoundaries.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesQuantileStreamResourceGetBucketBoundaries" input_arg { name: "quantile_stream_resource_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceHandleOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceHandleOp.pbtxt index c106a233c27023..ca40a0aa1ede35 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceHandleOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesQuantileStreamResourceHandleOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesQuantileStreamResourceHandleOp" output_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSerializeEnsemble.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSerializeEnsemble.pbtxt index 88a5330954209c..29d19f03117634 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSerializeEnsemble.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSerializeEnsemble.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesSerializeEnsemble" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSparseAggregateStats.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSparseAggregateStats.pbtxt index b482c304d3dac7..9260634bf1edb5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSparseAggregateStats.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSparseAggregateStats.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesSparseAggregateStats" input_arg { name: "node_ids" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSparseCalculateBestFeatureSplit.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSparseCalculateBestFeatureSplit.pbtxt index cd590bfbc3838b..86f7a5ffd2118c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSparseCalculateBestFeatureSplit.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesSparseCalculateBestFeatureSplit.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesSparseCalculateBestFeatureSplit" input_arg { name: "node_id_range" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesTrainingPredict.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesTrainingPredict.pbtxt index 8b8b1053ff2c64..615f52c656de08 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesTrainingPredict.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesTrainingPredict.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesTrainingPredict" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesUpdateEnsemble.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesUpdateEnsemble.pbtxt index 9d50a5d4797069..9cd779e314a38c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesUpdateEnsemble.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesUpdateEnsemble.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesUpdateEnsemble" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesUpdateEnsembleV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesUpdateEnsembleV2.pbtxt index f1dc44c6f7ed06..2a573217f3ec25 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesUpdateEnsembleV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BoostedTreesUpdateEnsembleV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BoostedTreesUpdateEnsembleV2" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BroadcastArgs.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BroadcastArgs.pbtxt index 328d3fdb0da8ed..e6dc3990634597 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BroadcastArgs.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BroadcastArgs.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BroadcastArgs" input_arg { name: "s0" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BroadcastGradientArgs.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BroadcastGradientArgs.pbtxt index a0e831191f46cf..2e1d739f9881ac 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BroadcastGradientArgs.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BroadcastGradientArgs.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BroadcastGradientArgs" input_arg { name: "s0" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BroadcastTo.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BroadcastTo.pbtxt index 350e630e9df0fe..4d29f9ebeb8d22 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BroadcastTo.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BroadcastTo.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BroadcastTo" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Bucketize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Bucketize.pbtxt index 76174ffde85193..abe818e666cafd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Bucketize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Bucketize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Bucketize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/BytesProducedStatsDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/BytesProducedStatsDataset.pbtxt index 2e71b250953aaf..7cbfbbd146c83f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/BytesProducedStatsDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/BytesProducedStatsDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "BytesProducedStatsDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixComponents.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixComponents.pbtxt index 050ebb37abfa96..614097be7efe9e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixComponents.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixComponents.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CSRSparseMatrixComponents" input_arg { name: "csr_sparse_matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixToDense.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixToDense.pbtxt index be8df4e2115271..ed0cab0fcca8f4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixToDense.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixToDense.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CSRSparseMatrixToDense" input_arg { name: "sparse_input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixToSparseTensor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixToSparseTensor.pbtxt index dba86fe65b7ed8..39870fe0ca893c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixToSparseTensor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CSRSparseMatrixToSparseTensor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CSRSparseMatrixToSparseTensor" input_arg { name: "sparse_matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CSVDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CSVDataset.pbtxt index 02655c49a13501..56e1a03ab62995 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CSVDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CSVDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CSVDataset" input_arg { name: "filenames" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CSVDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CSVDatasetV2.pbtxt index 0d3909d2bff660..c540e8407e8480 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CSVDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CSVDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CSVDatasetV2" input_arg { name: "filenames" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CTCBeamSearchDecoder.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CTCBeamSearchDecoder.pbtxt index 6bcd60420e5fdb..5bd1968c832d29 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CTCBeamSearchDecoder.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CTCBeamSearchDecoder.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CTCBeamSearchDecoder" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CTCGreedyDecoder.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CTCGreedyDecoder.pbtxt index ab7f9af6e8c943..65e36d169a3bd9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CTCGreedyDecoder.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CTCGreedyDecoder.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CTCGreedyDecoder" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CTCLoss.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CTCLoss.pbtxt index 54ced032292324..1d7e041dd9111e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CTCLoss.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CTCLoss.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CTCLoss" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CTCLossV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CTCLossV2.pbtxt index e9631882f6595a..5a68abaa4e5ad8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CTCLossV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CTCLossV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CTCLossV2" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CacheDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CacheDataset.pbtxt index 3acc3ce899e6c4..8b8ec246c5be80 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CacheDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CacheDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CacheDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CacheDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CacheDatasetV2.pbtxt index 78d13d28238562..43fe482cba6e0a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CacheDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CacheDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CacheDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Case.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Case.pbtxt index 5a2cb6a6dfafbb..39cfc3f723630d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Case.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Case.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Case" input_arg { name: "branch_index" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Cast.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Cast.pbtxt index 695048c5c775e2..581a8e4a8cc8a7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Cast.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Cast.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Cast" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Ceil.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Ceil.pbtxt index ad3771e9a84fdb..cdec08500072df 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Ceil.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Ceil.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Ceil" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CheckNumerics.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CheckNumerics.pbtxt index 64bafc119602a5..9e63b170b231db 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CheckNumerics.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CheckNumerics.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CheckNumerics" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CheckNumericsV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CheckNumericsV2.pbtxt index 362728e3dc53b1..ba06f6a3524e48 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CheckNumericsV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CheckNumericsV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CheckNumericsV2" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Cholesky.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Cholesky.pbtxt index bd26758db797c8..e3cee5fcf89aa8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Cholesky.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Cholesky.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Cholesky" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CholeskyGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CholeskyGrad.pbtxt index e526acd1f84513..0f7c7efd88c46f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CholeskyGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CholeskyGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CholeskyGrad" input_arg { name: "l" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ChooseFastestBranchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ChooseFastestBranchDataset.pbtxt index ab120b02bcfafd..4850496a3954b3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ChooseFastestBranchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ChooseFastestBranchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ChooseFastestBranchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ChooseFastestDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ChooseFastestDataset.pbtxt index f27c03e6e881d4..476e834edde27e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ChooseFastestDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ChooseFastestDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ChooseFastestDataset" input_arg { name: "input_datasets" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ClipByValue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ClipByValue.pbtxt index 821ca0c2425f7d..8652a168e85187 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ClipByValue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ClipByValue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ClipByValue" input_arg { name: "t" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CloseSummaryWriter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CloseSummaryWriter.pbtxt index 7a4dea11575c66..f67e1aaff2470c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CloseSummaryWriter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CloseSummaryWriter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CloseSummaryWriter" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollateTPUEmbeddingMemory.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollateTPUEmbeddingMemory.pbtxt index 61f6f6fcccabaf..16c232371ad944 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollateTPUEmbeddingMemory.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollateTPUEmbeddingMemory.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollateTPUEmbeddingMemory" input_arg { name: "memory_configs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveAllToAllV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveAllToAllV2.pbtxt index db1520d1af3f19..85e76487c019f1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveAllToAllV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveAllToAllV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveAllToAllV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveAllToAllV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveAllToAllV3.pbtxt index ce690300d651bb..8e08f0bbbf21bd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveAllToAllV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveAllToAllV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveAllToAllV3" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveAssignGroupV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveAssignGroupV2.pbtxt index 888884e1dd1a87..891eab6162c961 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveAssignGroupV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveAssignGroupV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveAssignGroupV2" input_arg { name: "group_assignment" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastRecv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastRecv.pbtxt index aee2ceeb9dd728..bb2f18d7fd2637 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastRecv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastRecv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveBcastRecv" output_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastRecvV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastRecvV2.pbtxt index 7da7797030f61b..35a48eec2582f2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastRecvV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastRecvV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveBcastRecvV2" input_arg { name: "group_size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastSend.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastSend.pbtxt index 5ac9ecf25b7da5..50719b2f193899 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastSend.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastSend.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveBcastSend" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastSendV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastSendV2.pbtxt index f5f058fca6ec59..3fa3a7d269d76b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastSendV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveBcastSendV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveBcastSendV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveGather.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveGather.pbtxt index 1c5a520a41daf3..7a4610a33f6072 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveGather.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveGather.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveGather" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveGatherV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveGatherV2.pbtxt index 3bd706437fafed..d85b0612c3dddb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveGatherV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveGatherV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveGatherV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveInitializeCommunicator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveInitializeCommunicator.pbtxt index 4ac5ab48cdba99..9b9340a118a35e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveInitializeCommunicator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveInitializeCommunicator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveInitializeCommunicator" input_arg { name: "group_key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectivePermute.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectivePermute.pbtxt index 9a4ba13ea1917b..b3d33d2447e8e7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectivePermute.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectivePermute.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectivePermute" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduce.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduce.pbtxt index 303ce3ebd83abd..a93a8505dd3209 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduce.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduce.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveReduce" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceScatterV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceScatterV2.pbtxt index 49551a88bcf242..f64772c2d270f8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceScatterV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceScatterV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveReduceScatterV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceV2.pbtxt index 37e60303ec3657..b42db7ed9ae53c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveReduceV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceV3.pbtxt index 4459e5580d2264..53d1697add4c55 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CollectiveReduceV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CollectiveReduceV3" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CombinedNonMaxSuppression.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CombinedNonMaxSuppression.pbtxt index 983b747b1bac53..55e27122e9d862 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CombinedNonMaxSuppression.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CombinedNonMaxSuppression.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CombinedNonMaxSuppression" input_arg { name: "boxes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Complex.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Complex.pbtxt index c22e2b8045e10a..5d17643c89473c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Complex.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Complex.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Complex" input_arg { name: "real" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ComplexAbs.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ComplexAbs.pbtxt index 60f95372d7c985..6e7cfc1266aa15 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ComplexAbs.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ComplexAbs.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ComplexAbs" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CompositeTensorVariantFromComponents.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CompositeTensorVariantFromComponents.pbtxt index f6a900fd692d55..ed0660bc02e05f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CompositeTensorVariantFromComponents.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CompositeTensorVariantFromComponents.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CompositeTensorVariantFromComponents" input_arg { name: "components" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CompositeTensorVariantToComponents.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CompositeTensorVariantToComponents.pbtxt index b877c9535bd31d..fa8c5a4d95b316 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CompositeTensorVariantToComponents.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CompositeTensorVariantToComponents.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CompositeTensorVariantToComponents" input_arg { name: "encoded" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CompressElement.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CompressElement.pbtxt index 5da73190c445c8..07d8cb461afcbe 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CompressElement.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CompressElement.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CompressElement" input_arg { name: "components" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ComputeAccidentalHits.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ComputeAccidentalHits.pbtxt index 6d8dfad02d5845..0bac269ba6b9f2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ComputeAccidentalHits.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ComputeAccidentalHits.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ComputeAccidentalHits" input_arg { name: "true_classes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ComputeBatchSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ComputeBatchSize.pbtxt index 67dfb6ef08def0..13ab4eef4d0a61 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ComputeBatchSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ComputeBatchSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ComputeBatchSize" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataSize.pbtxt index b9f68e23d3d77b..0d4ec98a96f357 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ComputeDedupDataSize" output_arg { name: "num_elements" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataSizeV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataSizeV2.pbtxt index 7851f740a6b679..2493251c1fddc6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataSizeV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataSizeV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ComputeDedupDataSizeV2" output_arg { name: "num_elements" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataTupleMask.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataTupleMask.pbtxt index d04dc21c9546d6..ba91c01af1ba78 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataTupleMask.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataTupleMask.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ComputeDedupDataTupleMask" output_arg { name: "output_shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataTupleMaskV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataTupleMaskV2.pbtxt index f1e3e21c75ad1b..87deca2a2daecd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataTupleMaskV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ComputeDedupDataTupleMaskV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ComputeDedupDataTupleMaskV2" output_arg { name: "output_shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Concat.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Concat.pbtxt index 878098fbb5586a..21ff0fda4338f0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Concat.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Concat.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Concat" input_arg { name: "concat_dim" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConcatOffset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConcatOffset.pbtxt index a2d85962951864..f2fbb0062a1f11 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConcatOffset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConcatOffset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConcatOffset" input_arg { name: "concat_dim" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConcatV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConcatV2.pbtxt index d14b7b8d507d4e..d11dc14a9fffba 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConcatV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConcatV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConcatV2" input_arg { name: "values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConcatenateDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConcatenateDataset.pbtxt index f3d827b100dd35..3bdf420d07b14b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConcatenateDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConcatenateDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConcatenateDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConditionalAccumulator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConditionalAccumulator.pbtxt index fbb1e42c13cb5b..678e9736ff978e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConditionalAccumulator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConditionalAccumulator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConditionalAccumulator" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConfigureAndInitializeGlobalTPU.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConfigureAndInitializeGlobalTPU.pbtxt index 0ea06f390bcb5f..96ba7c2527d732 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConfigureAndInitializeGlobalTPU.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConfigureAndInitializeGlobalTPU.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConfigureAndInitializeGlobalTPU" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConfigureDistributedTPU.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConfigureDistributedTPU.pbtxt index 790c463a57b7f2..72f944042dcaaf 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConfigureDistributedTPU.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConfigureDistributedTPU.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConfigureDistributedTPU" output_arg { name: "topology" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbedding.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbedding.pbtxt index 6949705a6a0183..6e61f8870c30b7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbedding.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbedding.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConfigureTPUEmbedding" attr { name: "config" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbeddingHost.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbeddingHost.pbtxt index e7c07085a59269..2e99447437a6e9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbeddingHost.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbeddingHost.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConfigureTPUEmbeddingHost" input_arg { name: "common_config" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbeddingMemory.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbeddingMemory.pbtxt index 138d1486a5bcaf..ab1b0b8511e4c0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbeddingMemory.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConfigureTPUEmbeddingMemory.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConfigureTPUEmbeddingMemory" input_arg { name: "common_config" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conj.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conj.pbtxt index 74a543143d071a..6e98e166726cd9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conj.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conj.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conj" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConjugateTranspose.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConjugateTranspose.pbtxt index 16047a1a25d6fd..417a2a53cd0e7d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConjugateTranspose.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConjugateTranspose.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConjugateTranspose" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConnectTPUEmbeddingHosts.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConnectTPUEmbeddingHosts.pbtxt index 78b2395bae806d..af1e2d5e0e596f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConnectTPUEmbeddingHosts.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConnectTPUEmbeddingHosts.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConnectTPUEmbeddingHosts" input_arg { name: "network_configs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Const.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Const.pbtxt index ea0c1c0864951d..6512d22e435f73 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Const.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Const.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Const" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConsumeMutexLock.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConsumeMutexLock.pbtxt index 3a36b864063ee6..4340267edf935b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConsumeMutexLock.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConsumeMutexLock.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConsumeMutexLock" input_arg { name: "mutex_lock" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ControlTrigger.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ControlTrigger.pbtxt index 6c77cdfebf25dd..2fe84fee80152f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ControlTrigger.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ControlTrigger.pbtxt @@ -1,3 +1,3 @@ -op { +op { name: "ControlTrigger" } diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv.pbtxt index 1651b7a2150dd7..c626db885b594f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv2D.pbtxt index 1e7916c736761c..a95e3d23aea137 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropFilter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropFilter.pbtxt index a7bb1cbfc63127..1c656127f7e76a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropFilter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropFilter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv2DBackpropFilter" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropFilterV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropFilterV2.pbtxt index c56a507acddd69..e9bf463cc70237 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropFilterV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropFilterV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv2DBackpropFilterV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropInput.pbtxt index 0c46e05bbe485e..04d32885860ea6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv2DBackpropInput" input_arg { name: "input_sizes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropInputV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropInputV2.pbtxt index 82148176cadab3..414a06604d1d71 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropInputV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv2DBackpropInputV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv2DBackpropInputV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv3D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv3D.pbtxt index f62fda0c7c17c6..a04a66081fff37 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv3D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv3D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv3D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropFilter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropFilter.pbtxt index 1c43d8de15d2f2..3091cfdcba7629 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropFilter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropFilter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv3DBackpropFilter" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropFilterV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropFilterV2.pbtxt index fa96e2ceea7d65..2494eba6e8a2b6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropFilterV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropFilterV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv3DBackpropFilterV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropInput.pbtxt index 0a2429303607fa..7fa3a5548d7628 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv3DBackpropInput" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropInputV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropInputV2.pbtxt index 0bdc6e2d7d3d30..e01b33dc4a87ec 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropInputV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Conv3DBackpropInputV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Conv3DBackpropInputV2" input_arg { name: "input_sizes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConvertToCooTensor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConvertToCooTensor.pbtxt index 31aeb0dfd962c4..65577c03134142 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConvertToCooTensor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConvertToCooTensor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConvertToCooTensor" input_arg { name: "indices_or_row_splits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConvertToListOfSparseCoreCooTensors.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConvertToListOfSparseCoreCooTensors.pbtxt index ad7dc09892cd22..137b4eca0acfb6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConvertToListOfSparseCoreCooTensors.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConvertToListOfSparseCoreCooTensors.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConvertToListOfSparseCoreCooTensors" input_arg { name: "indices_or_row_splits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ConvertToSparseCoreCsrWrappedCooTensor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ConvertToSparseCoreCsrWrappedCooTensor.pbtxt index dc53acec41aed5..defd0e3b53b1c0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ConvertToSparseCoreCsrWrappedCooTensor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ConvertToSparseCoreCsrWrappedCooTensor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ConvertToSparseCoreCsrWrappedCooTensor" input_arg { name: "sorted_row_ids_list" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Copy.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Copy.pbtxt index 693fbfe081ac3f..258aecc2947457 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Copy.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Copy.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Copy" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CopyHost.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CopyHost.pbtxt index 89d49896724450..07eb864f460c25 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CopyHost.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CopyHost.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CopyHost" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CopyToMesh.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CopyToMesh.pbtxt index 3d1b4b1bffb059..50e0a66e784a74 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CopyToMesh.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CopyToMesh.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CopyToMesh" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CopyToMeshGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CopyToMeshGrad.pbtxt index c64c8dc3790bf8..e75ffe9bc3eb37 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CopyToMeshGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CopyToMeshGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CopyToMeshGrad" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Cos.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Cos.pbtxt index 00cdee090bb89b..52b7c1e795560c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Cos.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Cos.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Cos" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Cosh.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Cosh.pbtxt index 67b5ea0e591614..7a29316305061a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Cosh.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Cosh.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Cosh" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CountUpTo.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CountUpTo.pbtxt index e9ff14eb74db60..05726df8c11e4c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CountUpTo.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CountUpTo.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CountUpTo" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CreateSummaryDbWriter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CreateSummaryDbWriter.pbtxt index 1b8760c6bc65c9..7a5f844bbb6ed8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CreateSummaryDbWriter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CreateSummaryDbWriter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CreateSummaryDbWriter" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CreateSummaryFileWriter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CreateSummaryFileWriter.pbtxt index b2edd27ffa5695..61106e9fc851eb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CreateSummaryFileWriter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CreateSummaryFileWriter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CreateSummaryFileWriter" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CropAndResize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CropAndResize.pbtxt index cb5c951b82f088..57b02c6638874e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CropAndResize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CropAndResize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CropAndResize" input_arg { name: "image" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CropAndResizeGradBoxes.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CropAndResizeGradBoxes.pbtxt index 6d6be12cf1146f..d3f62e3ba4cea4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CropAndResizeGradBoxes.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CropAndResizeGradBoxes.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CropAndResizeGradBoxes" input_arg { name: "grads" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CropAndResizeGradImage.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CropAndResizeGradImage.pbtxt index a3d0713d1f34ac..6ae744f428a506 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CropAndResizeGradImage.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CropAndResizeGradImage.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CropAndResizeGradImage" input_arg { name: "grads" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Cross.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Cross.pbtxt index 12ba58289127fb..b80215fcbc16ee 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Cross.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Cross.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Cross" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CrossReplicaSum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CrossReplicaSum.pbtxt index 13c1bb582fccf9..947028f2c539a9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CrossReplicaSum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CrossReplicaSum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CrossReplicaSum" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNN.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNN.pbtxt index 4a7753b4988a00..181b2c30b153d5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNN.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNN.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNN" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackprop.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackprop.pbtxt index bc95978eb0da08..fbab0e98b82484 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackprop.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackprop.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNNBackprop" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackpropV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackpropV2.pbtxt index c76d1583656abd..3d2b03657a5e88 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackpropV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackpropV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNNBackpropV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackpropV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackpropV3.pbtxt index b901c64dbcbd3f..19caef08548618 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackpropV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNBackpropV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNNBackpropV3" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNCanonicalToParams.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNCanonicalToParams.pbtxt index 3b46f61998c093..b8162cb8123909 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNCanonicalToParams.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNCanonicalToParams.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNNCanonicalToParams" input_arg { name: "num_layers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNCanonicalToParamsV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNCanonicalToParamsV2.pbtxt index 9c51d58cdbe96c..25b6fa3b1bb323 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNCanonicalToParamsV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNCanonicalToParamsV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNNCanonicalToParamsV2" input_arg { name: "num_layers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsSize.pbtxt index d88ab4c5499f7a..d67825a0b67366 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNNParamsSize" input_arg { name: "num_layers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsToCanonical.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsToCanonical.pbtxt index 4cce256653d512..0c4917bdf6b554 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsToCanonical.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsToCanonical.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNNParamsToCanonical" input_arg { name: "num_layers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsToCanonicalV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsToCanonicalV2.pbtxt index 70fe8004c48bdd..40c3cd58536000 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsToCanonicalV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNParamsToCanonicalV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNNParamsToCanonicalV2" input_arg { name: "num_layers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNV2.pbtxt index caf4d17262626c..fb05fea4beb8c7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNNV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNV3.pbtxt index 91535bbe7adab8..8e71b147b727af 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CudnnRNNV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CudnnRNNV3" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Cumprod.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Cumprod.pbtxt index 9ae7fcfd322abb..2294de2efe238c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Cumprod.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Cumprod.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Cumprod" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Cumsum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Cumsum.pbtxt index a5817e2d44679b..4bf125962944d0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Cumsum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Cumsum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Cumsum" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/CumulativeLogsumexp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/CumulativeLogsumexp.pbtxt index e6e47e700f278f..21d25d772f478e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/CumulativeLogsumexp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/CumulativeLogsumexp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "CumulativeLogsumexp" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DTensorRestoreV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DTensorRestoreV2.pbtxt index 8b96f73f15b9b1..3e082064faff93 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DTensorRestoreV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DTensorRestoreV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DTensorRestoreV2" input_arg { name: "prefix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DTensorSetGlobalTPUArray.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DTensorSetGlobalTPUArray.pbtxt index f466f7f4a0f78a..b7cf1cfa9c5904 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DTensorSetGlobalTPUArray.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DTensorSetGlobalTPUArray.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DTensorSetGlobalTPUArray" input_arg { name: "topology" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DataFormatDimMap.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DataFormatDimMap.pbtxt index 0550a275713d1d..a01806bad6afa3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DataFormatDimMap.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DataFormatDimMap.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DataFormatDimMap" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DataFormatVecPermute.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DataFormatVecPermute.pbtxt index 1304fbbc4bacd8..e439414d6d3503 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DataFormatVecPermute.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DataFormatVecPermute.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DataFormatVecPermute" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DataServiceDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DataServiceDataset.pbtxt index f15d2af6c6b9b9..449ae514cbb89c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DataServiceDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DataServiceDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DataServiceDataset" input_arg { name: "dataset_id" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV2.pbtxt index cc213b8a2c0f84..1b3547e62b6ad8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DataServiceDatasetV2" input_arg { name: "dataset_id" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV3.pbtxt index 8860baa354c08b..0151a0ed46c148 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DataServiceDatasetV3" input_arg { name: "dataset_id" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV4.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV4.pbtxt index ca1f1709f98197..1ea88a869b037c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV4.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DataServiceDatasetV4.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DataServiceDatasetV4" input_arg { name: "dataset_id" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DatasetCardinality.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DatasetCardinality.pbtxt index 8e6e5f85f83e4d..435bf6774e03cd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DatasetCardinality.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DatasetCardinality.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DatasetCardinality" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DatasetFingerprint.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DatasetFingerprint.pbtxt index 11fcf992a5dd7b..db1c39a335720d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DatasetFingerprint.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DatasetFingerprint.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DatasetFingerprint" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DatasetFromGraph.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DatasetFromGraph.pbtxt index b7afa744de1fb4..54c8ed8b014816 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DatasetFromGraph.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DatasetFromGraph.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DatasetFromGraph" input_arg { name: "graph_def" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DatasetToGraph.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DatasetToGraph.pbtxt index 1cf25d1f8bf874..5bc711b6b66080 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DatasetToGraph.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DatasetToGraph.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DatasetToGraph" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DatasetToGraphV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DatasetToGraphV2.pbtxt index 8fd09dfe6ba036..9f0500c8bdf214 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DatasetToGraphV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DatasetToGraphV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DatasetToGraphV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DatasetToSingleElement.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DatasetToSingleElement.pbtxt index 1531425688c05b..8633fe65f4dcc1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DatasetToSingleElement.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DatasetToSingleElement.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DatasetToSingleElement" input_arg { name: "dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DatasetToTFRecord.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DatasetToTFRecord.pbtxt index e6547a87e970ec..e13f88e1c25d3b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DatasetToTFRecord.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DatasetToTFRecord.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DatasetToTFRecord" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Dawsn.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Dawsn.pbtxt index 7b69d80753e825..a2f8cba9d58b43 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Dawsn.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Dawsn.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Dawsn" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DebugGradientIdentity.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DebugGradientIdentity.pbtxt index 9c94f17ae44f02..e1b425730d8cd4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DebugGradientIdentity.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DebugGradientIdentity.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DebugGradientIdentity" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DebugGradientRefIdentity.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DebugGradientRefIdentity.pbtxt index 1cd87c9d5335b1..f75b7784ac0c05 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DebugGradientRefIdentity.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DebugGradientRefIdentity.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DebugGradientRefIdentity" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DebugIdentity.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DebugIdentity.pbtxt index f971fb59d398e2..50f97d847d4f3d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DebugIdentity.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DebugIdentity.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DebugIdentity" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DebugIdentityV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DebugIdentityV2.pbtxt index c6a523b2038bd3..ea92aaa4943cb1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DebugIdentityV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DebugIdentityV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DebugIdentityV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DebugIdentityV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DebugIdentityV3.pbtxt index ddcfd4f6a874a0..73d76b888bc0d8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DebugIdentityV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DebugIdentityV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DebugIdentityV3" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DebugNanCount.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DebugNanCount.pbtxt index cf635ac53aa6e1..82ae073497fd90 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DebugNanCount.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DebugNanCount.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DebugNanCount" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DebugNumericSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DebugNumericSummary.pbtxt index 965ef30ab24a90..d108b54e1cdea9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DebugNumericSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DebugNumericSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DebugNumericSummary" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DebugNumericSummaryV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DebugNumericSummaryV2.pbtxt index f58db91ce09782..58c11f58af0bf5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DebugNumericSummaryV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DebugNumericSummaryV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DebugNumericSummaryV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeAndCropJpeg.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeAndCropJpeg.pbtxt index ee8f8536e500b8..d1d767f4714b32 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeAndCropJpeg.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeAndCropJpeg.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeAndCropJpeg" input_arg { name: "contents" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeBase64.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeBase64.pbtxt index 700152f16eccb5..cc7d61ce119cf4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeBase64.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeBase64.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeBase64" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeBmp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeBmp.pbtxt index d8ab837cc10e35..40ac5f0b20bd00 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeBmp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeBmp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeBmp" input_arg { name: "contents" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeCSV.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeCSV.pbtxt index e60342992df29d..f4fee2a95a5ff3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeCSV.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeCSV.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeCSV" input_arg { name: "records" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeCompressed.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeCompressed.pbtxt index ec0e15b92b74ac..8a345fffa8f409 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeCompressed.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeCompressed.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeCompressed" input_arg { name: "bytes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeGif.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeGif.pbtxt index c0e732cd5f5d8d..89b21b376e4035 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeGif.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeGif.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeGif" input_arg { name: "contents" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeImage.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeImage.pbtxt index 6771daae228047..066ffd1091d0ee 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeImage.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeImage.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeImage" input_arg { name: "contents" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeJSONExample.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeJSONExample.pbtxt index 348bf9b1cda17a..ec37ae546c5b7d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeJSONExample.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeJSONExample.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeJSONExample" input_arg { name: "json_examples" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeJpeg.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeJpeg.pbtxt index ad09dea051a347..9a4b4e4443c530 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeJpeg.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeJpeg.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeJpeg" input_arg { name: "contents" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodePaddedRaw.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodePaddedRaw.pbtxt index d0bd867a9c3893..d4c3dab8607e8a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodePaddedRaw.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodePaddedRaw.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodePaddedRaw" input_arg { name: "input_bytes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodePng.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodePng.pbtxt index 68c77685db7118..dd7bd024365408 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodePng.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodePng.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodePng" input_arg { name: "contents" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeProtoV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeProtoV2.pbtxt index e6a52119199998..ae72d29d4e72f9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeProtoV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeProtoV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeProtoV2" input_arg { name: "bytes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeRaw.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeRaw.pbtxt index 5a3d5a475ffce7..bc8ce80563a202 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeRaw.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeRaw.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeRaw" input_arg { name: "bytes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DecodeWav.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DecodeWav.pbtxt index 698e67694d1224..8eba7b95ad01f0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DecodeWav.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DecodeWav.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DecodeWav" input_arg { name: "contents" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeepCopy.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeepCopy.pbtxt index 197842671a3467..e673960be5dc04 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeepCopy.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeepCopy.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeepCopy" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeleteIterator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeleteIterator.pbtxt index afd3cf0c3fdc15..3050ea922611dd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeleteIterator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeleteIterator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeleteIterator" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeleteMemoryCache.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeleteMemoryCache.pbtxt index 21de51004ebdda..821293ba6a7587 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeleteMemoryCache.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeleteMemoryCache.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeleteMemoryCache" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeleteMultiDeviceIterator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeleteMultiDeviceIterator.pbtxt index 4bba829418c3e4..b4ae640cec2abc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeleteMultiDeviceIterator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeleteMultiDeviceIterator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeleteMultiDeviceIterator" input_arg { name: "multi_device_iterator" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeleteRandomSeedGenerator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeleteRandomSeedGenerator.pbtxt index 9272e55a0d7b63..0c0d2d1057422b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeleteRandomSeedGenerator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeleteRandomSeedGenerator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeleteRandomSeedGenerator" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeleteSeedGenerator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeleteSeedGenerator.pbtxt index 125eae81544aa3..e588b1e9e7e85e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeleteSeedGenerator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeleteSeedGenerator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeleteSeedGenerator" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeleteSessionTensor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeleteSessionTensor.pbtxt index 6b4c55134baf66..def4c105535d95 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeleteSessionTensor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeleteSessionTensor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeleteSessionTensor" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DenseBincount.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DenseBincount.pbtxt index c96abfc5ab012c..9bab6854e406a9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DenseBincount.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DenseBincount.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DenseBincount" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DenseCountSparseOutput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DenseCountSparseOutput.pbtxt index cb500a1f4c9931..be566eab9f438d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DenseCountSparseOutput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DenseCountSparseOutput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DenseCountSparseOutput" input_arg { name: "values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DenseToCSRSparseMatrix.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DenseToCSRSparseMatrix.pbtxt index 159fe06324c252..c8b2f66380791a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DenseToCSRSparseMatrix.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DenseToCSRSparseMatrix.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DenseToCSRSparseMatrix" input_arg { name: "dense_input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DenseToDenseSetOperation.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DenseToDenseSetOperation.pbtxt index 74a3d2aaa02b5b..5188a82414ef18 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DenseToDenseSetOperation.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DenseToDenseSetOperation.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DenseToDenseSetOperation" input_arg { name: "set1" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DenseToSparseBatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DenseToSparseBatchDataset.pbtxt index 16838ea7d82424..cb972cca8aa42f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DenseToSparseBatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DenseToSparseBatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DenseToSparseBatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DenseToSparseSetOperation.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DenseToSparseSetOperation.pbtxt index e79d8f5e213011..71c9c37798f7b1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DenseToSparseSetOperation.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DenseToSparseSetOperation.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DenseToSparseSetOperation" input_arg { name: "set1" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DepthToSpace.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DepthToSpace.pbtxt index f5dd95b60c97d0..422fe7ff53f2a4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DepthToSpace.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DepthToSpace.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DepthToSpace" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNative.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNative.pbtxt index 90eae83c303cc0..805bf2d379aa03 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNative.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNative.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DepthwiseConv2dNative" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNativeBackpropFilter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNativeBackpropFilter.pbtxt index 199b883851c5ab..119933bf0ae7de 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNativeBackpropFilter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNativeBackpropFilter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DepthwiseConv2dNativeBackpropFilter" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNativeBackpropInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNativeBackpropInput.pbtxt index 99edc181b7d635..8ad511910587f6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNativeBackpropInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DepthwiseConv2dNativeBackpropInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DepthwiseConv2dNativeBackpropInput" input_arg { name: "input_sizes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Dequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Dequantize.pbtxt index 8db5d571979aea..1a39e1869bfb90 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Dequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Dequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Dequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeserializeIterator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeserializeIterator.pbtxt index 2d531bd93df362..1ae290e93c0f37 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeserializeIterator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeserializeIterator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeserializeIterator" input_arg { name: "resource_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeserializeManySparse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeserializeManySparse.pbtxt index 0ebb833a4b6383..f0e75d96d94d7e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeserializeManySparse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeserializeManySparse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeserializeManySparse" input_arg { name: "serialized_sparse" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeserializeSparse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeserializeSparse.pbtxt index c327f01280ec25..c23a9b58a62ed6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeserializeSparse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeserializeSparse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeserializeSparse" input_arg { name: "serialized_sparse" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DestroyResourceOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DestroyResourceOp.pbtxt index 355227a484d32b..aa16c5ad5235a5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DestroyResourceOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DestroyResourceOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DestroyResourceOp" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DestroyTemporaryVariable.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DestroyTemporaryVariable.pbtxt index 773e399783efe7..7e073b2f20b040 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DestroyTemporaryVariable.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DestroyTemporaryVariable.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DestroyTemporaryVariable" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DeviceIndex.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DeviceIndex.pbtxt index cfb79b60e3e44d..c513889e7b1813 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DeviceIndex.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DeviceIndex.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DeviceIndex" output_arg { name: "index" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Diag.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Diag.pbtxt index bae6dbfc7d2e76..92cb2071cf9788 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Diag.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Diag.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Diag" input_arg { name: "diagonal" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DiagPart.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DiagPart.pbtxt index 00e64aa39669e2..aec8c871407ccb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DiagPart.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DiagPart.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DiagPart" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Digamma.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Digamma.pbtxt index d29ad8723b06de..0c294e54f216ca 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Digamma.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Digamma.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Digamma" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Dilation2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Dilation2D.pbtxt index 30f06dc42062a5..1db8503014ae72 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Dilation2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Dilation2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Dilation2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Dilation2DBackpropFilter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Dilation2DBackpropFilter.pbtxt index 4dcc4b6f248e45..5a5a9f1dbb39cd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Dilation2DBackpropFilter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Dilation2DBackpropFilter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Dilation2DBackpropFilter" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Dilation2DBackpropInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Dilation2DBackpropInput.pbtxt index dc2e964457a8bf..8944211d86d45a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Dilation2DBackpropInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Dilation2DBackpropInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Dilation2DBackpropInput" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DirectedInterleaveDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DirectedInterleaveDataset.pbtxt index 9aaadbf0a6c014..61a9462fac3b59 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DirectedInterleaveDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DirectedInterleaveDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DirectedInterleaveDataset" input_arg { name: "selector_input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DisableCopyOnRead.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DisableCopyOnRead.pbtxt index 8a91880bab9c5b..61d189ae526cfa 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DisableCopyOnRead.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DisableCopyOnRead.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DisableCopyOnRead" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DistributedSave.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DistributedSave.pbtxt index 3cb284d442a94e..221820fddb1962 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DistributedSave.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DistributedSave.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DistributedSave" input_arg { name: "dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Div.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Div.pbtxt index 3ec0077535986c..fdc955f19ebe17 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Div.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Div.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Div" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DivNoNan.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DivNoNan.pbtxt index 40fc208137bcbd..ee9025f52f25a7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DivNoNan.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DivNoNan.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DivNoNan" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DrawBoundingBoxes.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DrawBoundingBoxes.pbtxt index 35952f3512e107..729817314e9c39 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DrawBoundingBoxes.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DrawBoundingBoxes.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DrawBoundingBoxes" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DrawBoundingBoxesV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DrawBoundingBoxesV2.pbtxt index 85ab33cee4f34c..0a561796ca0b61 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DrawBoundingBoxesV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DrawBoundingBoxesV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DrawBoundingBoxesV2" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DummyIterationCounter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DummyIterationCounter.pbtxt index 2f6bf602b173d3..b1df20cae731ca 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DummyIterationCounter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DummyIterationCounter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DummyIterationCounter" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DummyMemoryCache.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DummyMemoryCache.pbtxt index bbce6cafdc3958..63901e2585ea15 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DummyMemoryCache.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DummyMemoryCache.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DummyMemoryCache" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DummySeedGenerator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DummySeedGenerator.pbtxt index f08c0e07723d46..585bc7c7528344 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DummySeedGenerator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DummySeedGenerator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DummySeedGenerator" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt index 5ac83512996ed4..b494ece1a77218 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch" input_arg { name: "sample_indices_or_row_lengths" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DynamicEnqueueTPUEmbeddingRaggedTensorBatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DynamicEnqueueTPUEmbeddingRaggedTensorBatch.pbtxt index 506a023aa23583..46adf791fe5977 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DynamicEnqueueTPUEmbeddingRaggedTensorBatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DynamicEnqueueTPUEmbeddingRaggedTensorBatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DynamicEnqueueTPUEmbeddingRaggedTensorBatch" input_arg { name: "sample_splits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DynamicPartition.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DynamicPartition.pbtxt index c497964b636bc3..3565bd6f754540 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DynamicPartition.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DynamicPartition.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DynamicPartition" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/DynamicStitch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/DynamicStitch.pbtxt index 76226f65312d0a..aba8346995ccd3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/DynamicStitch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/DynamicStitch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "DynamicStitch" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EagerPyFunc.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EagerPyFunc.pbtxt index 302e420778b6f0..56c12e3845c6ff 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EagerPyFunc.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EagerPyFunc.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EagerPyFunc" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EditDistance.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EditDistance.pbtxt index 78fbc3f1ac4d9e..aba098b7020cdb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EditDistance.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EditDistance.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EditDistance" input_arg { name: "hypothesis_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Eig.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Eig.pbtxt index 3872e50f11ed76..d95892a88367df 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Eig.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Eig.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Eig" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Einsum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Einsum.pbtxt index 8a365bb22bf4d9..3855daa079bd40 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Einsum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Einsum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Einsum" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Elu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Elu.pbtxt index 3c9ea1633638b9..4b8a81527569a9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Elu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Elu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Elu" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EluGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EluGrad.pbtxt index 2b2e8b974c18f7..cfbc9f99e314f3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EluGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EluGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EluGrad" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Empty.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Empty.pbtxt index 760cab96732ccc..147854bb88cf23 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Empty.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Empty.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Empty" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EmptyTensorList.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EmptyTensorList.pbtxt index 46015e651e219f..d15fa1ad47048c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EmptyTensorList.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EmptyTensorList.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EmptyTensorList" input_arg { name: "element_shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EmptyTensorMap.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EmptyTensorMap.pbtxt index ee1fc166627b41..25327b4e1e8bfb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EmptyTensorMap.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EmptyTensorMap.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EmptyTensorMap" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EncodeBase64.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EncodeBase64.pbtxt index 001c3d0d9fb36d..6e5241d0fd7033 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EncodeBase64.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EncodeBase64.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EncodeBase64" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EncodeJpeg.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EncodeJpeg.pbtxt index 165a02aed32de1..9f3c3453e33e32 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EncodeJpeg.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EncodeJpeg.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EncodeJpeg" input_arg { name: "image" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EncodeJpegVariableQuality.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EncodeJpegVariableQuality.pbtxt index 97f93d28b288f0..94c41ea4e5d574 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EncodeJpegVariableQuality.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EncodeJpegVariableQuality.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EncodeJpegVariableQuality" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EncodePng.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EncodePng.pbtxt index 7b7d161e8cba6e..7d2cbd85225125 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EncodePng.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EncodePng.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EncodePng" input_arg { name: "image" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EncodeProto.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EncodeProto.pbtxt index 2cea90e4183ddf..e619618946a6c0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EncodeProto.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EncodeProto.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EncodeProto" input_arg { name: "sizes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EncodeWav.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EncodeWav.pbtxt index e468d229ff4024..b013362a47d9e4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EncodeWav.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EncodeWav.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EncodeWav" input_arg { name: "audio" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt index f8d3f09b3cd32a..efb854a44baf5e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EnqueueTPUEmbeddingArbitraryTensorBatch" input_arg { name: "sample_indices_or_row_lengths" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingBatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingBatch.pbtxt index b1be42b17eed44..a09c5d87f44fda 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingBatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingBatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EnqueueTPUEmbeddingBatch" input_arg { name: "batch" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingIntegerBatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingIntegerBatch.pbtxt index 37b123d9791a2a..26d63b6e49dc5e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingIntegerBatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingIntegerBatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EnqueueTPUEmbeddingIntegerBatch" input_arg { name: "batch" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingRaggedTensorBatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingRaggedTensorBatch.pbtxt index 5e1a15cbc95fcc..327bd4fb387700 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingRaggedTensorBatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingRaggedTensorBatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EnqueueTPUEmbeddingRaggedTensorBatch" input_arg { name: "sample_splits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingSparseBatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingSparseBatch.pbtxt index 03a19c2feed572..64b8cb5178c5a6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingSparseBatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingSparseBatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EnqueueTPUEmbeddingSparseBatch" input_arg { name: "sample_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingSparseTensorBatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingSparseTensorBatch.pbtxt index 19521bb9213393..ab1c9d264eca1e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingSparseTensorBatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EnqueueTPUEmbeddingSparseTensorBatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EnqueueTPUEmbeddingSparseTensorBatch" input_arg { name: "sample_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EnsureShape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EnsureShape.pbtxt index 336e2aed15cff6..24fa5589131df3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EnsureShape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EnsureShape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EnsureShape" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Enter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Enter.pbtxt index d0a555da70c264..d39d15f34dbf6f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Enter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Enter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Enter" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Equal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Equal.pbtxt index 8ad87805a6c558..a50cbdfcfeb1ac 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Equal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Equal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Equal" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Erf.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Erf.pbtxt index a53d8e380ef17c..680b736fa3e4a3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Erf.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Erf.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Erf" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Erfc.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Erfc.pbtxt index 589ca5a917c652..2fcfc68f04fbf2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Erfc.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Erfc.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Erfc" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Erfinv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Erfinv.pbtxt index 3b51f3b83c9d3b..78443dada86bc4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Erfinv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Erfinv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Erfinv" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/EuclideanNorm.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/EuclideanNorm.pbtxt index b4916ba2c7ee8c..b88f521b57199f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/EuclideanNorm.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/EuclideanNorm.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "EuclideanNorm" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExecuteTPUEmbeddingPartitioner.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExecuteTPUEmbeddingPartitioner.pbtxt index c2a3d248d1f844..37025570305f98 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExecuteTPUEmbeddingPartitioner.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExecuteTPUEmbeddingPartitioner.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExecuteTPUEmbeddingPartitioner" output_arg { name: "common_config" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Exit.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Exit.pbtxt index a722f2b7d5ced9..56a1371bec6b3f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Exit.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Exit.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Exit" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Exp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Exp.pbtxt index a237e834372291..7afeb677a29a54 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Exp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Exp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Exp" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExpandDims.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExpandDims.pbtxt index 9bd506742ce3d7..c7bb353162c8c8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExpandDims.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExpandDims.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExpandDims" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalAssertNextDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalAssertNextDataset.pbtxt index d33df8039e8b66..937afb4e0ae48f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalAssertNextDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalAssertNextDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalAssertNextDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalAutoShardDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalAutoShardDataset.pbtxt index 7a609f0300a365..d8d8d7ffc96e4e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalAutoShardDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalAutoShardDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalAutoShardDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalBytesProducedStatsDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalBytesProducedStatsDataset.pbtxt index 509dd811947653..10555bb3ebfbf5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalBytesProducedStatsDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalBytesProducedStatsDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalBytesProducedStatsDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalCSVDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalCSVDataset.pbtxt index 85d8950a845328..a618d55fcb6289 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalCSVDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalCSVDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalCSVDataset" input_arg { name: "filenames" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalChooseFastestDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalChooseFastestDataset.pbtxt index 7d52752fffe21d..2c04d58db4c0a4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalChooseFastestDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalChooseFastestDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalChooseFastestDataset" input_arg { name: "input_datasets" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDatasetCardinality.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDatasetCardinality.pbtxt index d0e7b0934d0dd6..f6ba3657864e1a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDatasetCardinality.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDatasetCardinality.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalDatasetCardinality" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDatasetToTFRecord.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDatasetToTFRecord.pbtxt index 76b1f8c1d0036f..0d0e46c8b39ade 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDatasetToTFRecord.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDatasetToTFRecord.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalDatasetToTFRecord" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDenseToSparseBatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDenseToSparseBatchDataset.pbtxt index 94afefd024b797..c322ef95777609 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDenseToSparseBatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDenseToSparseBatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalDenseToSparseBatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDirectedInterleaveDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDirectedInterleaveDataset.pbtxt index c2214754670c61..2a877497ff29fd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDirectedInterleaveDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalDirectedInterleaveDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalDirectedInterleaveDataset" input_arg { name: "selector_input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalGroupByReducerDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalGroupByReducerDataset.pbtxt index 94d88c86c5fd0c..5e2fd15a22c908 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalGroupByReducerDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalGroupByReducerDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalGroupByReducerDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalGroupByWindowDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalGroupByWindowDataset.pbtxt index 25ede182ce533a..35f9c3c1a9547d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalGroupByWindowDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalGroupByWindowDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalGroupByWindowDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalIgnoreErrorsDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalIgnoreErrorsDataset.pbtxt index 5180b62d1e57b5..8fc4e7d2c8adee 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalIgnoreErrorsDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalIgnoreErrorsDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalIgnoreErrorsDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalIteratorGetDevice.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalIteratorGetDevice.pbtxt index d65b5f8bb6c4a0..8e1e10240f9ea7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalIteratorGetDevice.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalIteratorGetDevice.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalIteratorGetDevice" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalLMDBDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalLMDBDataset.pbtxt index 05c9db9bd03516..e8b8694a947d95 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalLMDBDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalLMDBDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalLMDBDataset" input_arg { name: "filenames" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalLatencyStatsDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalLatencyStatsDataset.pbtxt index aa401c14384320..29ba38a500c2b7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalLatencyStatsDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalLatencyStatsDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalLatencyStatsDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMapAndBatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMapAndBatchDataset.pbtxt index 519fa71185c471..7799116408c237 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMapAndBatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMapAndBatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalMapAndBatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMapDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMapDataset.pbtxt index 75fef0d63ddb9f..401331b716073d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMapDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMapDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalMapDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMatchingFilesDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMatchingFilesDataset.pbtxt index 29a30d9b257a07..45ef522bf7fdb4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMatchingFilesDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMatchingFilesDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalMatchingFilesDataset" input_arg { name: "patterns" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMaxIntraOpParallelismDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMaxIntraOpParallelismDataset.pbtxt index 004b1dd4f9067d..109f3906b31852 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMaxIntraOpParallelismDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalMaxIntraOpParallelismDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalMaxIntraOpParallelismDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalNonSerializableDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalNonSerializableDataset.pbtxt index 72b5a2ef5b2d32..b0c45ac19941ce 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalNonSerializableDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalNonSerializableDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalNonSerializableDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalParallelInterleaveDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalParallelInterleaveDataset.pbtxt index 5d9a463a93ab4f..a90031c9dad06a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalParallelInterleaveDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalParallelInterleaveDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalParallelInterleaveDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalParseExampleDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalParseExampleDataset.pbtxt index 3e150e33913691..44701f67286318 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalParseExampleDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalParseExampleDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalParseExampleDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalPrivateThreadPoolDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalPrivateThreadPoolDataset.pbtxt index e81bde0383e847..3098c30ba69d11 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalPrivateThreadPoolDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalPrivateThreadPoolDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalPrivateThreadPoolDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalRandomDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalRandomDataset.pbtxt index 04d661ee71297c..c3276a46df5b41 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalRandomDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalRandomDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalRandomDataset" input_arg { name: "seed" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalRebatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalRebatchDataset.pbtxt index 2656cc09900abd..2c024741713d48 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalRebatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalRebatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalRebatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalScanDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalScanDataset.pbtxt index cc4fc8c0e9c249..39d42061ef58d6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalScanDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalScanDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalScanDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSetStatsAggregatorDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSetStatsAggregatorDataset.pbtxt index 4a6d59b28c2e80..291597bf11b8dc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSetStatsAggregatorDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSetStatsAggregatorDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalSetStatsAggregatorDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSleepDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSleepDataset.pbtxt index 06dbf87163fdae..806d764d9cdf12 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSleepDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSleepDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalSleepDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSlidingWindowDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSlidingWindowDataset.pbtxt index 6d81c4c97f833b..ab18f4e214c578 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSlidingWindowDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSlidingWindowDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalSlidingWindowDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSqlDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSqlDataset.pbtxt index 634a65912fa64a..f56ce488df0aba 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSqlDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalSqlDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalSqlDataset" input_arg { name: "driver_name" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalStatsAggregatorHandle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalStatsAggregatorHandle.pbtxt index a3493ec933b7c0..b00cadbca09498 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalStatsAggregatorHandle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalStatsAggregatorHandle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalStatsAggregatorHandle" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalStatsAggregatorSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalStatsAggregatorSummary.pbtxt index 87f63b282fe5f9..7886f7a6cb305e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalStatsAggregatorSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalStatsAggregatorSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalStatsAggregatorSummary" input_arg { name: "iterator" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalTakeWhileDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalTakeWhileDataset.pbtxt index db64c7eb605f44..7c9b4f86adbbe4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalTakeWhileDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalTakeWhileDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalTakeWhileDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalThreadPoolDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalThreadPoolDataset.pbtxt index dc50cd1b975c71..da23c415fd24f0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalThreadPoolDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalThreadPoolDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalThreadPoolDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalThreadPoolHandle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalThreadPoolHandle.pbtxt index 47dc873ee476fb..8b230f90470f29 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalThreadPoolHandle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalThreadPoolHandle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalThreadPoolHandle" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalUnbatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalUnbatchDataset.pbtxt index 42819a367ee74e..83f3a39f5e9244 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalUnbatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalUnbatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalUnbatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalUniqueDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalUniqueDataset.pbtxt index 4aa39a71b2d8bc..95668c930d7269 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExperimentalUniqueDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExperimentalUniqueDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExperimentalUniqueDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Expint.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Expint.pbtxt index afb722b09ac2c3..3080bf15de9170 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Expint.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Expint.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Expint" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Expm1.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Expm1.pbtxt index 6ea265efc65ffe..b09aac454d000e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Expm1.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Expm1.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Expm1" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExtractGlimpse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExtractGlimpse.pbtxt index 1b305eb38867d2..597a77a3f3b8fa 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExtractGlimpse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExtractGlimpse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExtractGlimpse" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExtractGlimpseV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExtractGlimpseV2.pbtxt index 8dababaccb9423..08725f4504ce01 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExtractGlimpseV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExtractGlimpseV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExtractGlimpseV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExtractImagePatches.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExtractImagePatches.pbtxt index ada7acbbf57863..dee8034d6c7076 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExtractImagePatches.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExtractImagePatches.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExtractImagePatches" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExtractJpegShape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExtractJpegShape.pbtxt index ef8068e1e91f00..ac3d34ca234fea 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExtractJpegShape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExtractJpegShape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExtractJpegShape" input_arg { name: "contents" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ExtractVolumePatches.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ExtractVolumePatches.pbtxt index 96d79b25ea52eb..09cc21a38b47e6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ExtractVolumePatches.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ExtractVolumePatches.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ExtractVolumePatches" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FFT.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FFT.pbtxt index 1bc74e426a247f..e986f323936f51 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FFT.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FFT.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FFT" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FFT2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FFT2D.pbtxt index 38c837f8e91a87..adb1c253867f22 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FFT2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FFT2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FFT2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FFT3D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FFT3D.pbtxt index df20d1970b2bd3..9266d6db4a688f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FFT3D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FFT3D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FFT3D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FFTND.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FFTND.pbtxt index 1cb40fa334b2f2..20afcc3c5b466d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FFTND.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FFTND.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FFTND" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FIFOQueue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FIFOQueue.pbtxt index d3d57195e72efd..c3321a8c6e4782 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FIFOQueue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FIFOQueue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FIFOQueue" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FIFOQueueV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FIFOQueueV2.pbtxt index 6f9c48db9ebcb7..9b1c8404d0ddac 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FIFOQueueV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FIFOQueueV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FIFOQueueV2" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Fact.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Fact.pbtxt index 426124e73678e1..90a0ad8dd00112 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Fact.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Fact.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Fact" output_arg { name: "fact" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FakeParam.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FakeParam.pbtxt index 7b286c4697e1d3..dc2a7c5ea46991 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FakeParam.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FakeParam.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FakeParam" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxArgs.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxArgs.pbtxt index 5faa52d9136ca7..2d8eac83c59be4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxArgs.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxArgs.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FakeQuantWithMinMaxArgs" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxArgsGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxArgsGradient.pbtxt index 02850fbbc9b4d1..5d02f59da1e444 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxArgsGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxArgsGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FakeQuantWithMinMaxArgsGradient" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVars.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVars.pbtxt index 8fc979749d448a..233f5cc2f66134 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVars.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVars.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FakeQuantWithMinMaxVars" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsGradient.pbtxt index 595d2688c7b99d..cf8ed6f8b7e18d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FakeQuantWithMinMaxVarsGradient" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsPerChannel.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsPerChannel.pbtxt index 7300a1daf33c8a..551ae79cd94ce9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsPerChannel.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsPerChannel.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FakeQuantWithMinMaxVarsPerChannel" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt index 80e037e94e46f3..a787e251c60a6e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FakeQuantWithMinMaxVarsPerChannelGradient" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FakeQueue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FakeQueue.pbtxt index 1a45d2509e62a4..5e4cb62d941eb9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FakeQueue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FakeQueue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FakeQueue" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FileSystemSetConfiguration.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FileSystemSetConfiguration.pbtxt index 06b193366fea68..95d26fc09818f5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FileSystemSetConfiguration.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FileSystemSetConfiguration.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FileSystemSetConfiguration" input_arg { name: "scheme" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Fill.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Fill.pbtxt index e0859caefb7b41..543ae42239b4c3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Fill.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Fill.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Fill" input_arg { name: "dims" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FilterByLastComponentDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FilterByLastComponentDataset.pbtxt index 804e6f5dadc754..cf9bbc586524a9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FilterByLastComponentDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FilterByLastComponentDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FilterByLastComponentDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FilterDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FilterDataset.pbtxt index 193fa2eb091518..aad48d7aed4f62 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FilterDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FilterDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FilterDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FinalizeDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FinalizeDataset.pbtxt index cc8c78b502d528..38e49288d662e1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FinalizeDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FinalizeDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FinalizeDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FinalizeTPUEmbedding.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FinalizeTPUEmbedding.pbtxt index 00bc279a92250a..bd367fcdd12451 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FinalizeTPUEmbedding.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FinalizeTPUEmbedding.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FinalizeTPUEmbedding" input_arg { name: "common_config" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FinalizeTPUEmbeddingV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FinalizeTPUEmbeddingV2.pbtxt index fd57e3726f6040..63c69eaff3aaba 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FinalizeTPUEmbeddingV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FinalizeTPUEmbeddingV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FinalizeTPUEmbeddingV2" input_arg { name: "common_config" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Fingerprint.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Fingerprint.pbtxt index b00a23b0f22f8c..3a5585701ba722 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Fingerprint.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Fingerprint.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Fingerprint" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordDataset.pbtxt index 04aff9c6ec69fa..63522d5f47c331 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FixedLengthRecordDataset" input_arg { name: "filenames" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordDatasetV2.pbtxt index e27a084317ac5a..88d744513572c6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FixedLengthRecordDatasetV2" input_arg { name: "filenames" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordReader.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordReader.pbtxt index 9f3aeb4fa8f374..75b6018f249c13 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordReader.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordReader.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FixedLengthRecordReader" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordReaderV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordReaderV2.pbtxt index 1bd086e2d92964..b16e5225240580 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordReaderV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FixedLengthRecordReaderV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FixedLengthRecordReaderV2" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FixedUnigramCandidateSampler.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FixedUnigramCandidateSampler.pbtxt index 264c48f683c7ab..a7911344886c97 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FixedUnigramCandidateSampler.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FixedUnigramCandidateSampler.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FixedUnigramCandidateSampler" input_arg { name: "true_classes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FlatMapDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FlatMapDataset.pbtxt index f0515e584c7b92..dcf1a7ae71c41f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FlatMapDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FlatMapDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FlatMapDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Floor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Floor.pbtxt index 0653fc9864ca3f..27e405e22de85e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Floor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Floor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Floor" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FloorDiv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FloorDiv.pbtxt index 5b6aa1c29f34e1..dcaff127c7d9d0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FloorDiv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FloorDiv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FloorDiv" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FloorMod.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FloorMod.pbtxt index 2f912df3b49de2..bbf48e9b826570 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FloorMod.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FloorMod.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FloorMod" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FlushSummaryWriter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FlushSummaryWriter.pbtxt index c7c659b79f6d5c..f928d4abe99a55 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FlushSummaryWriter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FlushSummaryWriter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FlushSummaryWriter" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/For.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/For.pbtxt index d0cdc427284e7f..139990f3994b84 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/For.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/For.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "For" input_arg { name: "start" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FractionalAvgPool.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FractionalAvgPool.pbtxt index d503b56852ad5d..5fc527b066de60 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FractionalAvgPool.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FractionalAvgPool.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FractionalAvgPool" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FractionalAvgPoolGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FractionalAvgPoolGrad.pbtxt index 56028951959e56..cceb2fe903ab05 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FractionalAvgPoolGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FractionalAvgPoolGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FractionalAvgPoolGrad" input_arg { name: "orig_input_tensor_shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FractionalMaxPool.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FractionalMaxPool.pbtxt index 4360d2356fc979..a11b4ef05f1bc9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FractionalMaxPool.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FractionalMaxPool.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FractionalMaxPool" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FractionalMaxPoolGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FractionalMaxPoolGrad.pbtxt index 517e9117186358..711e98a5df1479 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FractionalMaxPoolGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FractionalMaxPoolGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FractionalMaxPoolGrad" input_arg { name: "orig_input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FresnelCos.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FresnelCos.pbtxt index 19d0bf180e584b..7be5bbcb2ff8d5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FresnelCos.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FresnelCos.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FresnelCos" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FresnelSin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FresnelSin.pbtxt index a05ab41d50ac8e..c8c91ba6a68b8d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FresnelSin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FresnelSin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FresnelSin" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNorm.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNorm.pbtxt index f2480179c00962..e5ac169b31ef96 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNorm.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNorm.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FusedBatchNorm" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGrad.pbtxt index 9aabfafdd18be8..bff7eecf0ce852 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FusedBatchNormGrad" input_arg { name: "y_backprop" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGradV2.pbtxt index 78e2509276d1e5..dea20af8afcc6e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FusedBatchNormGradV2" input_arg { name: "y_backprop" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGradV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGradV3.pbtxt index 2d2d3234b91df4..aa05a575bfeec0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGradV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormGradV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FusedBatchNormGradV3" input_arg { name: "y_backprop" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormV2.pbtxt index 1f8fe62eb8b782..99f482fc721d6d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FusedBatchNormV2" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormV3.pbtxt index 1afdb5c8875269..b1f608dbe3659d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FusedBatchNormV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FusedBatchNormV3" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FusedPadConv2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FusedPadConv2D.pbtxt index b4142aea4fbbf9..1a89c018c4e059 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FusedPadConv2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FusedPadConv2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FusedPadConv2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/FusedResizeAndPadConv2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/FusedResizeAndPadConv2D.pbtxt index b5870f695e766a..cfc716fa1d695c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/FusedResizeAndPadConv2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/FusedResizeAndPadConv2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "FusedResizeAndPadConv2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GRUBlockCell.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GRUBlockCell.pbtxt index 7c4519f6d7191f..7c0dd9d5fcff40 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GRUBlockCell.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GRUBlockCell.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GRUBlockCell" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GRUBlockCellGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GRUBlockCellGrad.pbtxt index a8b3b505af37d7..723bcbd0b6f49c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GRUBlockCellGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GRUBlockCellGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GRUBlockCellGrad" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Gather.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Gather.pbtxt index 37b2ae11c7eeb4..264a8366bb82b8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Gather.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Gather.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Gather" input_arg { name: "params" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GatherNd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GatherNd.pbtxt index 78376f9baff7b4..5ec2fd92ae606e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GatherNd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GatherNd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GatherNd" input_arg { name: "params" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GatherV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GatherV2.pbtxt index 607cb850411e0d..891e82e5fb3ed6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GatherV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GatherV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GatherV2" input_arg { name: "params" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GenerateBoundingBoxProposals.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GenerateBoundingBoxProposals.pbtxt index 6cb47345d8ccb7..adbc9d4c8e00f3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GenerateBoundingBoxProposals.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GenerateBoundingBoxProposals.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GenerateBoundingBoxProposals" input_arg { name: "scores" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GenerateVocabRemapping.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GenerateVocabRemapping.pbtxt index adb2f799c542d0..a095253dbb20b4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GenerateVocabRemapping.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GenerateVocabRemapping.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GenerateVocabRemapping" input_arg { name: "new_vocab_file" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GeneratorDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GeneratorDataset.pbtxt index b9ac804e012177..9f8da9c542648e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GeneratorDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GeneratorDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GeneratorDataset" input_arg { name: "init_func_other_args" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GetElementAtIndex.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GetElementAtIndex.pbtxt index 22b27a5530c826..82121301bd9fbb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GetElementAtIndex.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GetElementAtIndex.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GetElementAtIndex" input_arg { name: "dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GetMinibatchSplitsWithPhysicalReplica.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GetMinibatchSplitsWithPhysicalReplica.pbtxt index 764a8982073313..85bc30bf6dc098 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GetMinibatchSplitsWithPhysicalReplica.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GetMinibatchSplitsWithPhysicalReplica.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GetMinibatchSplitsWithPhysicalReplica" input_arg { name: "program_key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GetMinibatchesInCsrWithPhysicalReplica.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GetMinibatchesInCsrWithPhysicalReplica.pbtxt index 45c3b531422fc9..79632c44322921 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GetMinibatchesInCsrWithPhysicalReplica.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GetMinibatchesInCsrWithPhysicalReplica.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GetMinibatchesInCsrWithPhysicalReplica" input_arg { name: "program_key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GetOptions.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GetOptions.pbtxt index 1562e89627eddd..f8f161238d5630 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GetOptions.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GetOptions.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GetOptions" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GetSessionHandle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GetSessionHandle.pbtxt index 90e66c7bd59690..e5345ec6f0ea1f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GetSessionHandle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GetSessionHandle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GetSessionHandle" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GetSessionHandleV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GetSessionHandleV2.pbtxt index 55dcf277a9e465..60405234b168b4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GetSessionHandleV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GetSessionHandleV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GetSessionHandleV2" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GetSessionTensor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GetSessionTensor.pbtxt index fef3d08c0a7b9f..5c4cf8af9c55dc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GetSessionTensor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GetSessionTensor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GetSessionTensor" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GetStatsFromListOfSparseCoreCooTensors.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GetStatsFromListOfSparseCoreCooTensors.pbtxt index 8a9eb07d6259e5..f2ea7cfeb6ed88 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GetStatsFromListOfSparseCoreCooTensors.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GetStatsFromListOfSparseCoreCooTensors.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GetStatsFromListOfSparseCoreCooTensors" input_arg { name: "row_ids_list" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GetTpuTaskId.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GetTpuTaskId.pbtxt index dab64d5df42503..a5c46e67ad3e0e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GetTpuTaskId.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GetTpuTaskId.pbtxt @@ -4,5 +4,4 @@ op { name: "tpu_task_id" type: DT_INT32 } - is_stateful: false } diff --git a/tensorflow/core/ops/compat/ops_history_v2/GlobalIterId.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GlobalIterId.pbtxt index 5fa2302622c9ac..9dcfcb49a6d0cb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GlobalIterId.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GlobalIterId.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GlobalIterId" output_arg { name: "iter_id" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GlobalShuffleDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GlobalShuffleDataset.pbtxt index 131281a80ec590..0bce0beea54070 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GlobalShuffleDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GlobalShuffleDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GlobalShuffleDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Greater.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Greater.pbtxt index bbf5f46aab7d53..8860e3c0c1097c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Greater.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Greater.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Greater" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GreaterEqual.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GreaterEqual.pbtxt index db2bbd30ff2ce3..5bcdd3789c789a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GreaterEqual.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GreaterEqual.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GreaterEqual" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GroupByReducerDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GroupByReducerDataset.pbtxt index d550546eb64bd1..320e628f8aabba 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GroupByReducerDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GroupByReducerDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GroupByReducerDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GroupByWindowDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GroupByWindowDataset.pbtxt index da3bfd2542624d..0de0de53d0e7d3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GroupByWindowDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GroupByWindowDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GroupByWindowDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/GuaranteeConst.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/GuaranteeConst.pbtxt index 9ec864b74c024b..71d47e3758090e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/GuaranteeConst.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/GuaranteeConst.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "GuaranteeConst" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/HSVToRGB.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/HSVToRGB.pbtxt index 6169e00a62058a..2b209cc6547ab6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/HSVToRGB.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/HSVToRGB.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "HSVToRGB" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/HashTable.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/HashTable.pbtxt index eb3a57246ed277..83afe2b9448103 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/HashTable.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/HashTable.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "HashTable" output_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/HashTableV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/HashTableV2.pbtxt index ea6383d9f392b7..24a9bc7176d8ae 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/HashTableV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/HashTableV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "HashTableV2" output_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/HistogramFixedWidth.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/HistogramFixedWidth.pbtxt index 201df5b3467a1c..f39eabe4f72506 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/HistogramFixedWidth.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/HistogramFixedWidth.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "HistogramFixedWidth" input_arg { name: "values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/HistogramSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/HistogramSummary.pbtxt index 6b5c4c9df1303a..0c46f397972bf6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/HistogramSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/HistogramSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "HistogramSummary" input_arg { name: "tag" diff --git a/tensorflow/core/ops/compat/ops_history_v2/HostConst.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/HostConst.pbtxt index 81975b57d5e37d..6dd4c1757073fd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/HostConst.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/HostConst.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "HostConst" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IFFT.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IFFT.pbtxt index a4c712b70cdf89..8571a132950cb1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IFFT.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IFFT.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IFFT" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IFFT2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IFFT2D.pbtxt index f079604a4bec8c..0b208d46939354 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IFFT2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IFFT2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IFFT2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IFFT3D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IFFT3D.pbtxt index bba5431e81392a..8b9667f882c67c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IFFT3D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IFFT3D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IFFT3D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IFFTND.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IFFTND.pbtxt index 72956229f0cde9..0dfb1fc9c51904 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IFFTND.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IFFTND.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IFFTND" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IRFFT.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IRFFT.pbtxt index 1a42324399eeb1..8ac3dfc979d8ef 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IRFFT.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IRFFT.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IRFFT" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IRFFT2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IRFFT2D.pbtxt index e348e4e8cbdf39..5d1f872d605e32 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IRFFT2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IRFFT2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IRFFT2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IRFFT3D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IRFFT3D.pbtxt index 3a6fa0da90e102..b69417ee1a7e57 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IRFFT3D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IRFFT3D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IRFFT3D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IRFFTND.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IRFFTND.pbtxt index c1d1da300962db..175092d5aa8158 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IRFFTND.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IRFFTND.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IRFFTND" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Identity.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Identity.pbtxt index 7646ed8ddb0964..f3ca3dbd24324a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Identity.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Identity.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Identity" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IdentityN.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IdentityN.pbtxt index aedd30ebafdddb..61c3b63279003c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IdentityN.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IdentityN.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IdentityN" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IdentityReader.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IdentityReader.pbtxt index 9e1aa0e11e219d..3330154b4d60ca 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IdentityReader.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IdentityReader.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IdentityReader" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IdentityReaderV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IdentityReaderV2.pbtxt index 27c72561ec1364..f37e9cedab961a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IdentityReaderV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IdentityReaderV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IdentityReaderV2" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/If.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/If.pbtxt index 595f6002939acb..7ccb12afa61896 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/If.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/If.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "If" input_arg { name: "cond" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Igamma.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Igamma.pbtxt index 821fa48275501f..d356f5ec5816c6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Igamma.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Igamma.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Igamma" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IgammaGradA.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IgammaGradA.pbtxt index 739385e8c4da4f..964067de5dcb69 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IgammaGradA.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IgammaGradA.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IgammaGradA" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Igammac.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Igammac.pbtxt index 1cfa801aeeb09a..cdf44f684bbdfc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Igammac.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Igammac.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Igammac" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IgnoreErrorsDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IgnoreErrorsDataset.pbtxt index 94fed463e57f7c..32af3bb466528e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IgnoreErrorsDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IgnoreErrorsDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IgnoreErrorsDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Imag.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Imag.pbtxt index f8c3ce95533bf1..1444b0c60b05f6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Imag.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Imag.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Imag" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ImageProjectiveTransformV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ImageProjectiveTransformV2.pbtxt index bd58faf8e16445..891b8b1cb7a6cc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ImageProjectiveTransformV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ImageProjectiveTransformV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ImageProjectiveTransformV2" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ImageProjectiveTransformV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ImageProjectiveTransformV3.pbtxt index 92887b2cc18151..115ee4269e7490 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ImageProjectiveTransformV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ImageProjectiveTransformV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ImageProjectiveTransformV3" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ImageSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ImageSummary.pbtxt index cbe4a0123f2eda..fafd7173195e3d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ImageSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ImageSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ImageSummary" input_arg { name: "tag" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ImmutableConst.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ImmutableConst.pbtxt index 51bb4050b9f65c..ba1180951f7083 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ImmutableConst.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ImmutableConst.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ImmutableConst" output_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ImportEvent.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ImportEvent.pbtxt index d80a4f171a4090..7be31dd0ae5844 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ImportEvent.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ImportEvent.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ImportEvent" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InTopK.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InTopK.pbtxt index 881ead0a3631b5..6acd3b62e91d28 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InTopK.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InTopK.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InTopK" input_arg { name: "predictions" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InTopKV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InTopKV2.pbtxt index 003655ca052f60..a6ca2b83a45a37 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InTopKV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InTopKV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InTopKV2" input_arg { name: "predictions" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IndexFlatMapDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IndexFlatMapDataset.pbtxt index 348e21901d1743..e28bead11f8c03 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IndexFlatMapDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IndexFlatMapDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IndexFlatMapDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InfeedDequeue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InfeedDequeue.pbtxt index 595fbefc51366c..a48d840da663f3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InfeedDequeue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InfeedDequeue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InfeedDequeue" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InfeedDequeueTuple.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InfeedDequeueTuple.pbtxt index e3c4cdc4a0f5bb..dc6ab2b0b66476 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InfeedDequeueTuple.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InfeedDequeueTuple.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InfeedDequeueTuple" output_arg { name: "outputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueue.pbtxt index f62d220e778769..759b91401e9de0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InfeedEnqueue" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueuePrelinearizedBuffer.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueuePrelinearizedBuffer.pbtxt index e6dfae7dcea10c..d281b700bd4aad 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueuePrelinearizedBuffer.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueuePrelinearizedBuffer.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InfeedEnqueuePrelinearizedBuffer" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueueTuple.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueueTuple.pbtxt index f1339fc123d0b6..459c5d9218fd6a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueueTuple.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InfeedEnqueueTuple.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InfeedEnqueueTuple" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InitializeTable.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InitializeTable.pbtxt index 571557348711b2..35a46a99c24987 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InitializeTable.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InitializeTable.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InitializeTable" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromDataset.pbtxt index 69dc6753d84f1a..fe0ec4d4b176c2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InitializeTableFromDataset" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromTextFile.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromTextFile.pbtxt index 08c63cbe8d4ced..77be4cadf98245 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromTextFile.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromTextFile.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InitializeTableFromTextFile" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromTextFileV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromTextFileV2.pbtxt index 69b41a04dc866e..6593434f2a6f4d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromTextFileV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InitializeTableFromTextFileV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InitializeTableFromTextFileV2" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InitializeTableV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InitializeTableV2.pbtxt index 6e7aeb5f95946e..62c565902faf69 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InitializeTableV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InitializeTableV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InitializeTableV2" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InplaceAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InplaceAdd.pbtxt index 31799cddf4fee9..7c6685770b7f3f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InplaceAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InplaceAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InplaceAdd" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InplaceSub.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InplaceSub.pbtxt index 28754d0755e050..42d6c14a586c49 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InplaceSub.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InplaceSub.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InplaceSub" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InplaceUpdate.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InplaceUpdate.pbtxt index 448bff4d381669..94b7f24aecc2ca 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InplaceUpdate.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InplaceUpdate.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InplaceUpdate" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InterleaveDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InterleaveDataset.pbtxt index 9a2505d5540d19..124a84cf82fc55 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InterleaveDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InterleaveDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InterleaveDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Inv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Inv.pbtxt index 30ed71735243f7..0c191790030e8d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Inv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Inv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Inv" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InvGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InvGrad.pbtxt index 1c1d4971bf43f5..af882a90b23f58 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InvGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InvGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InvGrad" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Invert.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Invert.pbtxt index 6915b9d38fd471..cd9c81231790c4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Invert.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Invert.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Invert" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/InvertPermutation.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/InvertPermutation.pbtxt index 74c7d64b10ba81..fa028961e3220c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/InvertPermutation.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/InvertPermutation.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "InvertPermutation" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IsBoostedTreesEnsembleInitialized.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IsBoostedTreesEnsembleInitialized.pbtxt index 4086fdf15d3ffa..1b19fef0df2c93 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IsBoostedTreesEnsembleInitialized.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IsBoostedTreesEnsembleInitialized.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IsBoostedTreesEnsembleInitialized" input_arg { name: "tree_ensemble_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IsBoostedTreesQuantileStreamResourceInitialized.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IsBoostedTreesQuantileStreamResourceInitialized.pbtxt index 868a34f393b315..359e0e9ba5798c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IsBoostedTreesQuantileStreamResourceInitialized.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IsBoostedTreesQuantileStreamResourceInitialized.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IsBoostedTreesQuantileStreamResourceInitialized" input_arg { name: "quantile_stream_resource_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IsFinite.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IsFinite.pbtxt index d3051d645ba2b1..8410dce0cb011f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IsFinite.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IsFinite.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IsFinite" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IsInf.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IsInf.pbtxt index 20604d6e930955..1ce6c74691e3d6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IsInf.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IsInf.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IsInf" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IsNan.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IsNan.pbtxt index bc6c66435d00e0..826f2fff6c507d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IsNan.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IsNan.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IsNan" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IsTPUEmbeddingInitialized.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IsTPUEmbeddingInitialized.pbtxt index 6a6a893bfddcd4..bbfe80cc97ac21 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IsTPUEmbeddingInitialized.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IsTPUEmbeddingInitialized.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IsTPUEmbeddingInitialized" output_arg { name: "is_tpu_embedding_initialized" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IsVariableInitialized.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IsVariableInitialized.pbtxt index c61e3263a6aeb9..03496db8d32030 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IsVariableInitialized.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IsVariableInitialized.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IsVariableInitialized" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IsotonicRegression.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IsotonicRegression.pbtxt index 3c9050f67f2607..abe6fb4bbd849a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IsotonicRegression.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IsotonicRegression.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IsotonicRegression" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Iterator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Iterator.pbtxt index a9b8b66ee19046..76b9fdef4ed26e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Iterator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Iterator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Iterator" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IteratorFromStringHandle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IteratorFromStringHandle.pbtxt index e4964182ad4259..ebd34378194d2d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IteratorFromStringHandle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IteratorFromStringHandle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IteratorFromStringHandle" input_arg { name: "string_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IteratorFromStringHandleV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IteratorFromStringHandleV2.pbtxt index 3faf956c0b6030..624c47394db730 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IteratorFromStringHandleV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IteratorFromStringHandleV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IteratorFromStringHandleV2" input_arg { name: "string_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IteratorGetDevice.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IteratorGetDevice.pbtxt index 02020169c71f71..8d379c1557b2ab 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IteratorGetDevice.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IteratorGetDevice.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IteratorGetDevice" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IteratorGetModelProto.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IteratorGetModelProto.pbtxt index ee47f9d7d43634..b1343becfa9bd6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IteratorGetModelProto.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IteratorGetModelProto.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IteratorGetModelProto" input_arg { name: "iterator" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNext.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNext.pbtxt index daddc26f37be32..f204011ed431b7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNext.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNext.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IteratorGetNext" input_arg { name: "iterator" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNextAsOptional.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNextAsOptional.pbtxt index 5cbd8314b9f0e7..c1a532f165c6f0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNextAsOptional.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNextAsOptional.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IteratorGetNextAsOptional" input_arg { name: "iterator" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNextSync.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNextSync.pbtxt index c7f29d2773cfad..e1a7351d2da357 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNextSync.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IteratorGetNextSync.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IteratorGetNextSync" input_arg { name: "iterator" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IteratorToStringHandle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IteratorToStringHandle.pbtxt index 47fa0764034917..87f2dffc941b41 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IteratorToStringHandle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IteratorToStringHandle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IteratorToStringHandle" input_arg { name: "resource_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/IteratorV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/IteratorV2.pbtxt index 244fe9d0a49c62..6f7ab705485da4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/IteratorV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/IteratorV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "IteratorV2" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/KMC2ChainInitialization.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/KMC2ChainInitialization.pbtxt index f695595658365b..e9640975b0e685 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/KMC2ChainInitialization.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/KMC2ChainInitialization.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "KMC2ChainInitialization" input_arg { name: "distances" diff --git a/tensorflow/core/ops/compat/ops_history_v2/KmeansPlusPlusInitialization.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/KmeansPlusPlusInitialization.pbtxt index ea5914ba22f17c..27ab4b34885bc9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/KmeansPlusPlusInitialization.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/KmeansPlusPlusInitialization.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "KmeansPlusPlusInitialization" input_arg { name: "points" diff --git a/tensorflow/core/ops/compat/ops_history_v2/KthOrderStatistic.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/KthOrderStatistic.pbtxt index 3b0ca9d2d07479..8e5b79cec0442b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/KthOrderStatistic.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/KthOrderStatistic.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "KthOrderStatistic" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/L2Loss.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/L2Loss.pbtxt index 1e72b270d39aee..90e8619d09f2e7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/L2Loss.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/L2Loss.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "L2Loss" input_arg { name: "t" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LMDBDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LMDBDataset.pbtxt index 6609f2d64fd84f..9ba1bd98191f8e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LMDBDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LMDBDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LMDBDataset" input_arg { name: "filenames" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LMDBReader.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LMDBReader.pbtxt index b6f52544c52965..967c74bb72c778 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LMDBReader.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LMDBReader.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LMDBReader" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LRN.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LRN.pbtxt index aba0c94025dd81..75880682c31830 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LRN.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LRN.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LRN" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LRNGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LRNGrad.pbtxt index 65a6c221aaeff0..37db775eaa236b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LRNGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LRNGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LRNGrad" input_arg { name: "input_grads" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LSTMBlockCell.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LSTMBlockCell.pbtxt index 88643e69b1c1ae..f1071f7fc51699 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LSTMBlockCell.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LSTMBlockCell.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LSTMBlockCell" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LSTMBlockCellGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LSTMBlockCellGrad.pbtxt index 1851e5bd6e6bfb..b20d47c5c01ef5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LSTMBlockCellGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LSTMBlockCellGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LSTMBlockCellGrad" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LatencyStatsDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LatencyStatsDataset.pbtxt index cdb1716e83521e..546bcdcbd1233e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LatencyStatsDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LatencyStatsDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LatencyStatsDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LeakyRelu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LeakyRelu.pbtxt index 4cbb4fdf427b38..c0358f96a87f10 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LeakyRelu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LeakyRelu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LeakyRelu" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LeakyReluGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LeakyReluGrad.pbtxt index 524638322a3f62..786872202c456d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LeakyReluGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LeakyReluGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LeakyReluGrad" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LearnedUnigramCandidateSampler.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LearnedUnigramCandidateSampler.pbtxt index cce62fa5d0cc4b..71466c56726990 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LearnedUnigramCandidateSampler.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LearnedUnigramCandidateSampler.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LearnedUnigramCandidateSampler" input_arg { name: "true_classes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LeftShift.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LeftShift.pbtxt index 94088ea60d6d12..c3f56bee3bb47a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LeftShift.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LeftShift.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LeftShift" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LegacyParallelInterleaveDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LegacyParallelInterleaveDatasetV2.pbtxt index 6d4d712d004182..49f6a5574721c8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LegacyParallelInterleaveDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LegacyParallelInterleaveDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LegacyParallelInterleaveDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Less.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Less.pbtxt index 818d92f302140e..e4f12455aa5ae3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Less.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Less.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Less" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LessEqual.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LessEqual.pbtxt index 477d8bf1d92fdd..9162a684069fd8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LessEqual.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LessEqual.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LessEqual" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Lgamma.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Lgamma.pbtxt index f43959c6afe82a..fcb0241217bdfa 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Lgamma.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Lgamma.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Lgamma" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LinSpace.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LinSpace.pbtxt index ad58235a544266..044ba244206f96 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LinSpace.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LinSpace.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LinSpace" input_arg { name: "start" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ListDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ListDataset.pbtxt index c095f6d1194705..2180d6d82a474c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ListDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ListDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ListDataset" input_arg { name: "tensors" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ListDiff.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ListDiff.pbtxt index 090baff95aefa0..39c3ee8606ccfd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ListDiff.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ListDiff.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ListDiff" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ListSnapshotChunksDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ListSnapshotChunksDataset.pbtxt index be35470141fb28..1b667052f0e069 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ListSnapshotChunksDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ListSnapshotChunksDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ListSnapshotChunksDataset" input_arg { name: "snapshot_path" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadAllTPUEmbeddingParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadAllTPUEmbeddingParameters.pbtxt index 68a146348b2758..5ac0c1cfb2b345 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadAllTPUEmbeddingParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadAllTPUEmbeddingParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadAllTPUEmbeddingParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadAndRemapMatrix.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadAndRemapMatrix.pbtxt index be16dec44ee721..54b4a68f2b344b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadAndRemapMatrix.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadAndRemapMatrix.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadAndRemapMatrix" input_arg { name: "ckpt_path" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadDataset.pbtxt index 41e306f86429a9..c46f54ef3c53a1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadDataset" input_arg { name: "path" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingADAMParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingADAMParameters.pbtxt index 38aec474b28889..5294493f6d1f30 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingADAMParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingADAMParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingADAMParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdadeltaParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdadeltaParameters.pbtxt index c8d55510e0c983..c093049ee0d2f3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdadeltaParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdadeltaParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingAdadeltaParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdagradMomentumParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdagradMomentumParameters.pbtxt index be33451441efe9..82f2cb2161fc7a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdagradMomentumParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdagradMomentumParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingAdagradMomentumParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdagradParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdagradParameters.pbtxt index b4325420e97e5d..485ec861a1eae4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdagradParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingAdagradParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingAdagradParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingCenteredRMSPropParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingCenteredRMSPropParameters.pbtxt index 42b53f9cf2e60c..72c02df3763afb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingCenteredRMSPropParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingCenteredRMSPropParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingCenteredRMSPropParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingFTRLParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingFTRLParameters.pbtxt index 0bceec9c7897de..a429697249129a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingFTRLParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingFTRLParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingFTRLParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt index 2485f0c96ca4f5..ab6af2875d5dc1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingFrequencyEstimatorParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingFrequencyEstimatorParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingMDLAdagradLightParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingMDLAdagradLightParameters.pbtxt index 2140a2b84b2b59..a175817a6e931d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingMDLAdagradLightParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingMDLAdagradLightParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingMDLAdagradLightParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingMomentumParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingMomentumParameters.pbtxt index 3da833e5595b2a..0f135f7c6f0e25 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingMomentumParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingMomentumParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingMomentumParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingProximalAdagradParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingProximalAdagradParameters.pbtxt index c272880303b6ad..10d611a38b2e78 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingProximalAdagradParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingProximalAdagradParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingProximalAdagradParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingProximalYogiParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingProximalYogiParameters.pbtxt index 7feae76245ed14..8f51ed094ae8e1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingProximalYogiParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingProximalYogiParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingProximalYogiParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingRMSPropParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingRMSPropParameters.pbtxt index 711f9e56dfe055..cfec4be6aa7d1c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingRMSPropParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingRMSPropParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingRMSPropParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingStochasticGradientDescentParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingStochasticGradientDescentParameters.pbtxt index a61ee8b1e4bc4e..48b965b4ea3691 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingStochasticGradientDescentParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoadTPUEmbeddingStochasticGradientDescentParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoadTPUEmbeddingStochasticGradientDescentParameters" input_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Log.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Log.pbtxt index 7e5b08a05a1282..a16862c0735f93 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Log.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Log.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Log" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Log1p.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Log1p.pbtxt index b1f686eebe6982..1f8ba12957e726 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Log1p.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Log1p.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Log1p" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LogMatrixDeterminant.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LogMatrixDeterminant.pbtxt index aee2342ce070aa..3807cdda42595e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LogMatrixDeterminant.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LogMatrixDeterminant.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LogMatrixDeterminant" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LogSoftmax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LogSoftmax.pbtxt index 59748cdb10238b..92d2727bbe7e23 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LogSoftmax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LogSoftmax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LogSoftmax" input_arg { name: "logits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LogUniformCandidateSampler.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LogUniformCandidateSampler.pbtxt index 1066189f03b1b3..9ec45571159093 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LogUniformCandidateSampler.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LogUniformCandidateSampler.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LogUniformCandidateSampler" input_arg { name: "true_classes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LogicalAnd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LogicalAnd.pbtxt index bdad772540320e..b10b115df4f23f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LogicalAnd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LogicalAnd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LogicalAnd" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LogicalNot.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LogicalNot.pbtxt index cba489e4022795..5cf13ad8399c7b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LogicalNot.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LogicalNot.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LogicalNot" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LogicalOr.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LogicalOr.pbtxt index 9306d3d95b357a..635a66d8ba0634 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LogicalOr.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LogicalOr.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LogicalOr" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableExport.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableExport.pbtxt index f083b589d1cd21..6c56cdeb1de748 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableExport.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableExport.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableExport" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableExportV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableExportV2.pbtxt index 264b3867d11698..b86fd3a32ad47b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableExportV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableExportV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableExportV2" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableFind.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableFind.pbtxt index e9e33182da56de..5923b502abcb15 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableFind.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableFind.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableFind" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableFindV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableFindV2.pbtxt index 4fc069f0e48d0d..53cbafbeee082f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableFindV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableFindV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableFindV2" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableImport.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableImport.pbtxt index c3184809ed3962..73b53a5b6f5784 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableImport.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableImport.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableImport" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableImportV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableImportV2.pbtxt index aa684ec6b5bcb2..41c03b83c71ba1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableImportV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableImportV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableImportV2" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableInsert.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableInsert.pbtxt index b33cb4837a7bfc..b96cb478887922 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableInsert.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableInsert.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableInsert" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableInsertV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableInsertV2.pbtxt index c2e5b1a3cb448c..19d7d49b8600a3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableInsertV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableInsertV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableInsertV2" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableRemoveV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableRemoveV2.pbtxt index 325b4316da3810..d7fe0bb4dd4e40 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableRemoveV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableRemoveV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableRemoveV2" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableSize.pbtxt index a878f30b9ea764..0d4bf61189fa33 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableSize" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LookupTableSizeV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LookupTableSizeV2.pbtxt index 6fb6f47f945262..511beedff01e3b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LookupTableSizeV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LookupTableSizeV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LookupTableSizeV2" input_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LoopCond.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LoopCond.pbtxt index 3be4701ef48460..7111fff007b5f2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LoopCond.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LoopCond.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LoopCond" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/LowerBound.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/LowerBound.pbtxt index 934c879c0907dc..b7d1dee7797707 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/LowerBound.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/LowerBound.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "LowerBound" input_arg { name: "sorted_inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Lu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Lu.pbtxt index a5140b8af7a87e..59c28e09e704d4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Lu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Lu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Lu" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MakeIterator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MakeIterator.pbtxt index 84e12f49dba0e6..b11c2b9e1dd1b1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MakeIterator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MakeIterator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MakeIterator" input_arg { name: "dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MakeUnique.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MakeUnique.pbtxt index a6dbebb1e588a9..685f52d66eaead 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MakeUnique.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MakeUnique.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MakeUnique" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MapAndBatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MapAndBatchDataset.pbtxt index d4e5d9a29a79f2..8e7b6a32493801 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MapAndBatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MapAndBatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MapAndBatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MapClear.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MapClear.pbtxt index 261e8ba9b6f5b0..22c5e5fcad020b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MapClear.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MapClear.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MapClear" attr { name: "capacity" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MapDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MapDataset.pbtxt index 6840222b360148..b01b535e48d6fd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MapDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MapDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MapDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MapDefun.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MapDefun.pbtxt index 243d9866d0bbe0..7cb9d19231cfd9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MapDefun.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MapDefun.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MapDefun" input_arg { name: "arguments" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MapIncompleteSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MapIncompleteSize.pbtxt index b6c12953967540..ca9c629887fa58 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MapIncompleteSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MapIncompleteSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MapIncompleteSize" output_arg { name: "size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MapPeek.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MapPeek.pbtxt index 13973572d1cf49..4a61cb9e40f47c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MapPeek.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MapPeek.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MapPeek" input_arg { name: "key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MapSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MapSize.pbtxt index 5bffb06eb1ea59..6828f8fbb09a3f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MapSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MapSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MapSize" output_arg { name: "size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MapStage.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MapStage.pbtxt index c34737c065777f..4ad2131a1f1844 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MapStage.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MapStage.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MapStage" input_arg { name: "key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MapUnstage.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MapUnstage.pbtxt index f288ab5f51cdb2..9901130961c8a1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MapUnstage.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MapUnstage.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MapUnstage" input_arg { name: "key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MapUnstageNoKey.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MapUnstageNoKey.pbtxt index ce11b85c5063cb..ee4cca51346065 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MapUnstageNoKey.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MapUnstageNoKey.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MapUnstageNoKey" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatMul.pbtxt index 8f79fa11000f7f..42a2c794353672 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatMul" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatchingFiles.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatchingFiles.pbtxt index e374694de6f10a..3f8af5f3226375 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatchingFiles.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatchingFiles.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatchingFiles" input_arg { name: "pattern" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatchingFilesDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatchingFilesDataset.pbtxt index 916f7e1e60a1c5..bf43730d6dc87f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatchingFilesDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatchingFilesDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatchingFilesDataset" input_arg { name: "patterns" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixBandPart.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixBandPart.pbtxt index c6ab4921aa4f70..c25aa9615c51d1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixBandPart.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixBandPart.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixBandPart" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixDeterminant.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixDeterminant.pbtxt index 791c7f7a5afc57..4dd524d4894ed4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixDeterminant.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixDeterminant.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixDeterminant" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiag.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiag.pbtxt index 299678fef206a3..9b0ddb0285d0cd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiag.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiag.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixDiag" input_arg { name: "diagonal" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPart.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPart.pbtxt index 4952fc6b2f2acc..efb1e18fccba39 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPart.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPart.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixDiagPart" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPartV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPartV2.pbtxt index 3325a0d4069336..f709c6d5eb556c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPartV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPartV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixDiagPartV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPartV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPartV3.pbtxt index c7df214d46ed30..75a1307f279142 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPartV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagPartV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixDiagPartV3" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagV2.pbtxt index 4fafe31b76cc93..3f6aa1e6a72fa5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixDiagV2" input_arg { name: "diagonal" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagV3.pbtxt index c8829be1bc41f8..793efcec3b6425 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixDiagV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixDiagV3" input_arg { name: "diagonal" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixExponential.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixExponential.pbtxt index d173fbe0515836..008291a4cafd64 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixExponential.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixExponential.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixExponential" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixInverse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixInverse.pbtxt index 6b3befc4dd031f..81d35ad1d082ae 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixInverse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixInverse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixInverse" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixLogarithm.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixLogarithm.pbtxt index c3cc81e38c6cb0..0a87e5905d0123 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixLogarithm.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixLogarithm.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixLogarithm" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiag.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiag.pbtxt index 0dcfca6102cc1b..e8c08f8d295192 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiag.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiag.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixSetDiag" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiagV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiagV2.pbtxt index c11740fbd69a34..1147220c00c774 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiagV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiagV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixSetDiagV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiagV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiagV3.pbtxt index 1490839cf0b3f6..d5a6af8b119291 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiagV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixSetDiagV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixSetDiagV3" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixSolve.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixSolve.pbtxt index 50cf802d9ff263..2a28fa0adb0377 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixSolve.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixSolve.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixSolve" input_arg { name: "matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixSolveLs.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixSolveLs.pbtxt index 082064ef822021..5df48fc28e3b29 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixSolveLs.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixSolveLs.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixSolveLs" input_arg { name: "matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixSquareRoot.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixSquareRoot.pbtxt index b7e690181cad3e..32ff859e8f9826 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixSquareRoot.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixSquareRoot.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixSquareRoot" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MatrixTriangularSolve.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MatrixTriangularSolve.pbtxt index e925e1189322b4..915e582d00a0e1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MatrixTriangularSolve.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MatrixTriangularSolve.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MatrixTriangularSolve" input_arg { name: "matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Max.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Max.pbtxt index 91262d8e435aee..bf147acf0f405f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Max.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Max.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Max" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxIntraOpParallelismDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxIntraOpParallelismDataset.pbtxt index 946f2f18f84161..85547917acc6e3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxIntraOpParallelismDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxIntraOpParallelismDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxIntraOpParallelismDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPool.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPool.pbtxt index 2fd8174576359a..f4fd1cccf29f7d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPool.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPool.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPool" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPool3D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPool3D.pbtxt index 928c2c0aeaf32b..7af4fca0e93e15 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPool3D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPool3D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPool3D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPool3DGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPool3DGrad.pbtxt index 8e8f10e4fe80e7..77edcb4c89887a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPool3DGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPool3DGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPool3DGrad" input_arg { name: "orig_input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPool3DGradGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPool3DGradGrad.pbtxt index 5012c312b1bf1f..55d26c13c9cab9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPool3DGradGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPool3DGradGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPool3DGradGrad" input_arg { name: "orig_input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGrad.pbtxt index 2a36894500bdd8..131a3633cf98f7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPoolGrad" input_arg { name: "orig_input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGrad.pbtxt index 9589f707f4c481..9b1f4de08ea069 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPoolGradGrad" input_arg { name: "orig_input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGradV2.pbtxt index c716043ae9ac56..fba1ab57dc6b28 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPoolGradGradV2" input_arg { name: "orig_input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGradWithArgmax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGradWithArgmax.pbtxt index dc96f707c66e8a..3c3cdbb90d1287 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGradWithArgmax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradGradWithArgmax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPoolGradGradWithArgmax" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradV2.pbtxt index 42d63ab79b1e60..7e38cf840dd0e0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPoolGradV2" input_arg { name: "orig_input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradWithArgmax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradWithArgmax.pbtxt index 1d30e8703369a6..7c3ab4a0cd05c6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradWithArgmax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolGradWithArgmax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPoolGradWithArgmax" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolV2.pbtxt index 82dc586d6d487d..3ef7da8d9d9848 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPoolV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolWithArgmax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolWithArgmax.pbtxt index 0a4a4df9c8135d..d33bbd2f70769e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MaxPoolWithArgmax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MaxPoolWithArgmax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MaxPoolWithArgmax" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Maximum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Maximum.pbtxt index 32bbcb645478ef..c6ee10114c63f0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Maximum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Maximum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Maximum" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Mean.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Mean.pbtxt index 10769852ec7d10..e0b5f145616867 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Mean.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Mean.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Mean" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Merge.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Merge.pbtxt index 23610f501a46a0..d08f9cc55e94f0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Merge.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Merge.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Merge" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MergeDedupData.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MergeDedupData.pbtxt index 147341c8f374af..a5bcb48c34a0f1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MergeDedupData.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MergeDedupData.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MergeDedupData" input_arg { name: "integer_tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MergeSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MergeSummary.pbtxt index ffe893e53bfa8a..d9b14d4511093e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MergeSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MergeSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MergeSummary" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MergeV2Checkpoints.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MergeV2Checkpoints.pbtxt index 96b092e55660ac..2a6b60b4e531c9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MergeV2Checkpoints.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MergeV2Checkpoints.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MergeV2Checkpoints" input_arg { name: "checkpoint_prefixes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Mfcc.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Mfcc.pbtxt index 8f4577250c9f3d..4c22eb8c69fe03 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Mfcc.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Mfcc.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Mfcc" input_arg { name: "spectrogram" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Min.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Min.pbtxt index bb0cd548c8a13a..4959b5e8d583b9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Min.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Min.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Min" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Minimum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Minimum.pbtxt index 756451b0b748d9..01cc483ba54dcb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Minimum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Minimum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Minimum" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MirrorPad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MirrorPad.pbtxt index 0642f97fafe52a..bf64a6ca5040b1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MirrorPad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MirrorPad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MirrorPad" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MirrorPadGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MirrorPadGrad.pbtxt index d1503a556c6127..b544cfbe72e7c4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MirrorPadGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MirrorPadGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MirrorPadGrad" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MlirPassthroughOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MlirPassthroughOp.pbtxt index 4f7ddeb99acaf0..5990eb2850281d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MlirPassthroughOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MlirPassthroughOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MlirPassthroughOp" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Mod.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Mod.pbtxt index 37a302eaa25de1..6c39ed683f6bee 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Mod.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Mod.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Mod" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ModelDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ModelDataset.pbtxt index d3228c40e1d4dc..14ac940f6a7764 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ModelDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ModelDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ModelDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Mul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Mul.pbtxt index d13b3adae90bd6..ef592669ff62cd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Mul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Mul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Mul" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MulNoNan.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MulNoNan.pbtxt index ba06844a75700e..ca5c92fb15bfde 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MulNoNan.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MulNoNan.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MulNoNan" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIterator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIterator.pbtxt index d7067ebf6eab43..d85c553f186f3b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIterator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIterator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MultiDeviceIterator" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorFromStringHandle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorFromStringHandle.pbtxt index 74acb5eb9ce0c9..384b1477dbc599 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorFromStringHandle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorFromStringHandle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MultiDeviceIteratorFromStringHandle" input_arg { name: "string_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorGetNextFromShard.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorGetNextFromShard.pbtxt index 8cf4fb6c28514c..2e007c25b2337d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorGetNextFromShard.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorGetNextFromShard.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MultiDeviceIteratorGetNextFromShard" input_arg { name: "multi_device_iterator" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorInit.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorInit.pbtxt index a2829e514c5beb..a011997186af14 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorInit.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorInit.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MultiDeviceIteratorInit" input_arg { name: "dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorToStringHandle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorToStringHandle.pbtxt index 6f332755692ba1..d7780d79687166 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorToStringHandle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MultiDeviceIteratorToStringHandle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MultiDeviceIteratorToStringHandle" input_arg { name: "multi_device_iterator" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Multinomial.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Multinomial.pbtxt index ee55af8dddac95..c258fa6e7ed4c8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Multinomial.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Multinomial.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Multinomial" input_arg { name: "logits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MutableDenseHashTable.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MutableDenseHashTable.pbtxt index 4ebb385704164c..eecaeb2d4fb570 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MutableDenseHashTable.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MutableDenseHashTable.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MutableDenseHashTable" input_arg { name: "empty_key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MutableDenseHashTableV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MutableDenseHashTableV2.pbtxt index 861d25c0c1b3f4..739079ced16cb6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MutableDenseHashTableV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MutableDenseHashTableV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MutableDenseHashTableV2" input_arg { name: "empty_key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MutableHashTable.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MutableHashTable.pbtxt index c1f2e216da5b83..a8ecc34cb18bd0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MutableHashTable.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MutableHashTable.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MutableHashTable" output_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableOfTensors.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableOfTensors.pbtxt index 7b71665fb96804..bdec2ff5939b39 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableOfTensors.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableOfTensors.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MutableHashTableOfTensors" output_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableOfTensorsV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableOfTensorsV2.pbtxt index d32880aba77e28..dc46d075df3d94 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableOfTensorsV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableOfTensorsV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MutableHashTableOfTensorsV2" output_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableV2.pbtxt index eb690ee06e1aa9..610214dfa76e66 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MutableHashTableV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MutableHashTableV2" output_arg { name: "table_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MutexLock.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MutexLock.pbtxt index 16f1ad6e0a1049..6b5747cd4e383e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MutexLock.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MutexLock.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MutexLock" input_arg { name: "mutex" diff --git a/tensorflow/core/ops/compat/ops_history_v2/MutexV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/MutexV2.pbtxt index ef04c075cd8d6e..b20f9b1e7996b3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/MutexV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/MutexV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "MutexV2" output_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NcclAllReduce.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NcclAllReduce.pbtxt index 59ab7c672eb6cc..80f91edef1d237 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NcclAllReduce.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NcclAllReduce.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NcclAllReduce" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NcclBroadcast.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NcclBroadcast.pbtxt index 46a2cab258fbf0..02a5487d1acf6f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NcclBroadcast.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NcclBroadcast.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NcclBroadcast" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NcclReduce.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NcclReduce.pbtxt index 141189947600f3..507f92cff2c87e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NcclReduce.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NcclReduce.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NcclReduce" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Ndtri.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Ndtri.pbtxt index c1549d77e8e841..a7a923f780db40 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Ndtri.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Ndtri.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Ndtri" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NearestNeighbors.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NearestNeighbors.pbtxt index 42b7b7c906bd85..5d1e5ed57659ac 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NearestNeighbors.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NearestNeighbors.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NearestNeighbors" input_arg { name: "points" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Neg.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Neg.pbtxt index 22a09654a9b7c3..864d0257fe4b2d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Neg.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Neg.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Neg" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NegTrain.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NegTrain.pbtxt index d97f107378d3b9..f12529fd6328b8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NegTrain.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NegTrain.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NegTrain" input_arg { name: "w_in" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NextAfter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NextAfter.pbtxt index 1cbacb54fe2bec..70e4afe6c77db5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NextAfter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NextAfter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NextAfter" input_arg { name: "x1" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NextIteration.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NextIteration.pbtxt index 6044c37879b4d2..7186fc0b684029 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NextIteration.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NextIteration.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NextIteration" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NoOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NoOp.pbtxt index 6c06e127e1bd6b..8f0370633fc1ce 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NoOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NoOp.pbtxt @@ -1,3 +1,3 @@ -op { +op { name: "NoOp" } diff --git a/tensorflow/core/ops/compat/ops_history_v2/NonDeterministicInts.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NonDeterministicInts.pbtxt index 470bdf155b510a..3fa5aa4a605c7d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NonDeterministicInts.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NonDeterministicInts.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NonDeterministicInts" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppression.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppression.pbtxt index a63ddf63dbac33..ded8b3728f4613 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppression.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppression.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NonMaxSuppression" input_arg { name: "boxes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV2.pbtxt index 0ff1ffcb5ae632..90c23bc0457dcc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NonMaxSuppressionV2" input_arg { name: "boxes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV3.pbtxt index bf9aac7c28f2c5..daeffd841b8e00 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NonMaxSuppressionV3" input_arg { name: "boxes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV4.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV4.pbtxt index ee1f325a9bd8d6..07ca92fef71df1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV4.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV4.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NonMaxSuppressionV4" input_arg { name: "boxes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV5.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV5.pbtxt index fbfe05a13141a1..cabec767a15617 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV5.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionV5.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NonMaxSuppressionV5" input_arg { name: "boxes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionWithOverlaps.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionWithOverlaps.pbtxt index a150d688af1b69..d89eeee4a4a34b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionWithOverlaps.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NonMaxSuppressionWithOverlaps.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NonMaxSuppressionWithOverlaps" input_arg { name: "overlaps" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NonSerializableDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NonSerializableDataset.pbtxt index b47c4157f9dbde..5fbd4bdb656c57 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NonSerializableDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NonSerializableDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NonSerializableDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NotEqual.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NotEqual.pbtxt index 738fc0de0114d8..099ef75d622d83 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NotEqual.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NotEqual.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NotEqual" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/NthElement.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/NthElement.pbtxt index a1c7a12ba00126..c9e797273df119 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/NthElement.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/NthElement.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "NthElement" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OneHot.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OneHot.pbtxt index 83163e8685a19e..0c1cfb6d686a95 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OneHot.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OneHot.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OneHot" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OneShotIterator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OneShotIterator.pbtxt index 362e5b271b172e..a2969bcc0e36c9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OneShotIterator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OneShotIterator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OneShotIterator" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OnesLike.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OnesLike.pbtxt index 6827c87d832660..d4609438139b35 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OnesLike.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OnesLike.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OnesLike" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OptimizeDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OptimizeDataset.pbtxt index 4a2b3f7420fd6c..5b40b213eb5dc8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OptimizeDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OptimizeDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OptimizeDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OptimizeDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OptimizeDatasetV2.pbtxt index d647c7bc1f0d58..7e1537ebe57a3a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OptimizeDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OptimizeDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OptimizeDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OptionalFromValue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OptionalFromValue.pbtxt index 3a9510a674c426..ddfc9c43d348c3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OptionalFromValue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OptionalFromValue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OptionalFromValue" input_arg { name: "components" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OptionalGetValue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OptionalGetValue.pbtxt index e5f3ab817ab570..e7364a1014afe8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OptionalGetValue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OptionalGetValue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OptionalGetValue" input_arg { name: "optional" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OptionalHasValue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OptionalHasValue.pbtxt index e744d908129750..da76333cecbf70 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OptionalHasValue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OptionalHasValue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OptionalHasValue" input_arg { name: "optional" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OptionalNone.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OptionalNone.pbtxt index a051d978224361..c47d6a745481e9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OptionalNone.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OptionalNone.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OptionalNone" output_arg { name: "optional" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OptionsDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OptionsDataset.pbtxt index 6e3c0a9fdb22a7..fc63e5ee2951cf 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OptionsDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OptionsDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OptionsDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapClear.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapClear.pbtxt index 2f77ea5e73bbb4..726e26e6172f05 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapClear.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapClear.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OrderedMapClear" attr { name: "capacity" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapIncompleteSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapIncompleteSize.pbtxt index a6439051fe23a0..9a9572a51beea8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapIncompleteSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapIncompleteSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OrderedMapIncompleteSize" output_arg { name: "size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapPeek.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapPeek.pbtxt index 8e61ac23f9bb73..0d9fd20fe077dd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapPeek.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapPeek.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OrderedMapPeek" input_arg { name: "key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapSize.pbtxt index 46777f1d7e7e8e..ea07d7e4215fb8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OrderedMapSize" output_arg { name: "size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapStage.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapStage.pbtxt index badbf44b424839..76af456ed8372b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapStage.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapStage.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OrderedMapStage" input_arg { name: "key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapUnstage.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapUnstage.pbtxt index f3d4dced87d04e..c09b4be94f4908 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapUnstage.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapUnstage.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OrderedMapUnstage" input_arg { name: "key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapUnstageNoKey.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapUnstageNoKey.pbtxt index 295f8258d599c4..bc3e8c7da30200 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OrderedMapUnstageNoKey.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OrderedMapUnstageNoKey.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OrderedMapUnstageNoKey" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeue.pbtxt index b802b78283c0ee..29dc8b5c5879bd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OutfeedDequeue" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueTuple.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueTuple.pbtxt index 580babfb502d44..3e0d31078b3ec6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueTuple.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueTuple.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OutfeedDequeueTuple" output_arg { name: "outputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueTupleV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueTupleV2.pbtxt index e8d13591681564..744744b4545cad 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueTupleV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueTupleV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OutfeedDequeueTupleV2" input_arg { name: "device_ordinal" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueV2.pbtxt index 5863aae467fed3..c5ca1f31a676cb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OutfeedDequeueV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OutfeedDequeueV2" input_arg { name: "device_ordinal" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OutfeedEnqueue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OutfeedEnqueue.pbtxt index 4836db6b12b31f..d8c16f4d62978c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OutfeedEnqueue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OutfeedEnqueue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OutfeedEnqueue" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/OutfeedEnqueueTuple.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/OutfeedEnqueueTuple.pbtxt index e7464383175911..0bf1a5ba4804eb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/OutfeedEnqueueTuple.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/OutfeedEnqueueTuple.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "OutfeedEnqueueTuple" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Pack.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Pack.pbtxt index 12eecb122fbe6a..65eb67509d3fd8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Pack.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Pack.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Pack" input_arg { name: "values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Pad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Pad.pbtxt index 5d1b80e2f3976c..1c7b9c7b457b3c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Pad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Pad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Pad" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PadV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PadV2.pbtxt index aa1a0fb109be72..463cb71f207ed0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PadV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PadV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PadV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PaddedBatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PaddedBatchDataset.pbtxt index f67cb9f4e4e5af..a118fc102f10a2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PaddedBatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PaddedBatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PaddedBatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PaddedBatchDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PaddedBatchDatasetV2.pbtxt index 1142cbf9eb5bed..4ae5a66624a65d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PaddedBatchDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PaddedBatchDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PaddedBatchDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PaddingFIFOQueue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PaddingFIFOQueue.pbtxt index e03ca8fecfec5f..f5eca52ba1927e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PaddingFIFOQueue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PaddingFIFOQueue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PaddingFIFOQueue" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PaddingFIFOQueueV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PaddingFIFOQueueV2.pbtxt index 12404b10303679..c398f9ee3a8a14 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PaddingFIFOQueueV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PaddingFIFOQueueV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PaddingFIFOQueueV2" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParallelBatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParallelBatchDataset.pbtxt index 023d855a4fa2fd..5c160cae2ddad7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParallelBatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParallelBatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParallelBatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParallelConcat.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParallelConcat.pbtxt index 154f48829d78d4..b0d1cc3918574a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParallelConcat.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParallelConcat.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParallelConcat" input_arg { name: "values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParallelDynamicStitch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParallelDynamicStitch.pbtxt index 8566fc274f031d..9ab18a1ba5e69e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParallelDynamicStitch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParallelDynamicStitch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParallelDynamicStitch" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParallelFilterDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParallelFilterDataset.pbtxt index e33694113b5105..1c895e2767636e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParallelFilterDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParallelFilterDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParallelFilterDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDataset.pbtxt index 730f9b76b490be..f278cb0efc6dfb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParallelInterleaveDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV2.pbtxt index 507463da15bb97..110573b42ed39f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParallelInterleaveDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV3.pbtxt index 3f73fc38abc0d0..096460fb1efcdb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParallelInterleaveDatasetV3" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV4.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV4.pbtxt index 65d63780827683..94f9ae0f6eebd1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV4.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParallelInterleaveDatasetV4.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParallelInterleaveDatasetV4" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParallelMapDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParallelMapDataset.pbtxt index 20ac8a4dcc3d51..991e4192983c3d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParallelMapDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParallelMapDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParallelMapDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParallelMapDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParallelMapDatasetV2.pbtxt index 87012c3c49f3d6..55e73b740adefd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParallelMapDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParallelMapDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParallelMapDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParameterizedTruncatedNormal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParameterizedTruncatedNormal.pbtxt index 8bca662ec52d00..1f96da6f7886e6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParameterizedTruncatedNormal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParameterizedTruncatedNormal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParameterizedTruncatedNormal" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParseExample.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParseExample.pbtxt index b1020d356343db..a1e35bde86b0a1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParseExample.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParseExample.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParseExample" input_arg { name: "serialized" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParseExampleDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParseExampleDataset.pbtxt index 4e12e94a6a28c8..4dc9ac1efb6cd3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParseExampleDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParseExampleDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParseExampleDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParseExampleDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParseExampleDatasetV2.pbtxt index 8af77a0bd59095..59632a160b121c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParseExampleDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParseExampleDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParseExampleDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParseExampleV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParseExampleV2.pbtxt index cc2706b046e368..0d7e1d37e23ac9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParseExampleV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParseExampleV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParseExampleV2" input_arg { name: "serialized" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParseSequenceExample.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParseSequenceExample.pbtxt index 8060fe90a3553e..03ac5be8d26160 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParseSequenceExample.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParseSequenceExample.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParseSequenceExample" input_arg { name: "serialized" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParseSequenceExampleV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParseSequenceExampleV2.pbtxt index 07dfc9d7467e14..7ba59734893b0b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParseSequenceExampleV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParseSequenceExampleV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParseSequenceExampleV2" input_arg { name: "serialized" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParseSingleExample.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParseSingleExample.pbtxt index 0ea9857ac83b6f..aaa69af4f62ac6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParseSingleExample.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParseSingleExample.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParseSingleExample" input_arg { name: "serialized" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParseSingleSequenceExample.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParseSingleSequenceExample.pbtxt index c0f76a20b44160..a0f52dbdd1f406 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParseSingleSequenceExample.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParseSingleSequenceExample.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParseSingleSequenceExample" input_arg { name: "serialized" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ParseTensor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ParseTensor.pbtxt index 20232f99165c8d..63d1f1292eed08 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ParseTensor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ParseTensor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ParseTensor" input_arg { name: "serialized" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PartitionedCall.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PartitionedCall.pbtxt index 59752220042a63..b51bd1de9fce0d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PartitionedCall.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PartitionedCall.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PartitionedCall" input_arg { name: "args" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Placeholder.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Placeholder.pbtxt index ec0fdcf19f7ebd..7c0f57a94e177d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Placeholder.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Placeholder.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Placeholder" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PlaceholderV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PlaceholderV2.pbtxt index e9d5b4840cb8e0..b2cd20b238fa5e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PlaceholderV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PlaceholderV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PlaceholderV2" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PlaceholderWithDefault.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PlaceholderWithDefault.pbtxt index 3d02d762312073..79a2ffb4492a6a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PlaceholderWithDefault.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PlaceholderWithDefault.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PlaceholderWithDefault" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Polygamma.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Polygamma.pbtxt index 3f20bc10cd2332..6bf0d9ba4cf9cc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Polygamma.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Polygamma.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Polygamma" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PopulationCount.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PopulationCount.pbtxt index 97e98373418383..d66c1ac00ae122 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PopulationCount.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PopulationCount.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PopulationCount" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Pow.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Pow.pbtxt index ad30b536cd7742..b1cc1b8f479801 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Pow.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Pow.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Pow" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PrefetchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PrefetchDataset.pbtxt index 92926f6523d1ff..81953a0e75c0c9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PrefetchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PrefetchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PrefetchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Prelinearize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Prelinearize.pbtxt index f74c9b381dce7d..b5ed810c25a426 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Prelinearize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Prelinearize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Prelinearize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PrelinearizeTuple.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PrelinearizeTuple.pbtxt index 92de7f372a74d5..bb1ae7d3e2f828 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PrelinearizeTuple.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PrelinearizeTuple.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PrelinearizeTuple" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PreventGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PreventGradient.pbtxt index 19a95b09d5e645..1649fc808aa935 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PreventGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PreventGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PreventGradient" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Print.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Print.pbtxt index 1966093e81c6ad..fbbb514b177737 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Print.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Print.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Print" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PrintV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PrintV2.pbtxt index 8f748465b810bd..c5942f0a6145a5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PrintV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PrintV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PrintV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PriorityQueue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PriorityQueue.pbtxt index af0f36d238d9e6..b44d83dfb20036 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PriorityQueue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PriorityQueue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PriorityQueue" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PriorityQueueV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PriorityQueueV2.pbtxt index ab426238988baf..a4e7c750b65a14 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PriorityQueueV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PriorityQueueV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PriorityQueueV2" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PrivateThreadPoolDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PrivateThreadPoolDataset.pbtxt index 483f4b88fd93f7..c16c1eb164728d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PrivateThreadPoolDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PrivateThreadPoolDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PrivateThreadPoolDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Prod.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Prod.pbtxt index d3c71eacd78cf9..fe9126bca16ed1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Prod.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Prod.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Prod" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PyFunc.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PyFunc.pbtxt index de5661de7e2150..987f028051ea0d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PyFunc.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PyFunc.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PyFunc" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/PyFuncStateless.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/PyFuncStateless.pbtxt index fa2ac3e446b1a7..2a587d53d937c8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/PyFuncStateless.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/PyFuncStateless.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "PyFuncStateless" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Qr.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Qr.pbtxt index 251f8ff7f7f0d6..8319528f8a016e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Qr.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Qr.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Qr" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantize.pbtxt index 30d0b750a8df0a..fb662f7057c2f0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizeAndDequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV2.pbtxt index d1fa4fba7fd4e5..d946e753cf7bbf 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizeAndDequantizeV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV3.pbtxt index 8d6a0317190cfe..cb7762a9deda20 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizeAndDequantizeV3" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV4.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV4.pbtxt index fca137de6cd4bc..2a49131faaf2d8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV4.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV4.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizeAndDequantizeV4" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV4Grad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV4Grad.pbtxt index 71df7854231502..0bbe87452b145d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV4Grad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizeAndDequantizeV4Grad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizeAndDequantizeV4Grad" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizeDownAndShrinkRange.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizeDownAndShrinkRange.pbtxt index ea6737ee390894..42783d3a14ebb4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizeDownAndShrinkRange.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizeDownAndShrinkRange.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizeDownAndShrinkRange" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizeV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizeV2.pbtxt index d164717d5aaf39..37cd1384176d6e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizeV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizeV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizeV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedAdd.pbtxt index be3f511f03aec3..4532bc23d50a2b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedAdd" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedAvgPool.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedAvgPool.pbtxt index 44e51eb39509f7..0ae3390d303c11 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedAvgPool.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedAvgPool.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedAvgPool" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedBatchNormWithGlobalNormalization.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedBatchNormWithGlobalNormalization.pbtxt index be4b9356265362..832b8ba577577d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedBatchNormWithGlobalNormalization.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedBatchNormWithGlobalNormalization.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedBatchNormWithGlobalNormalization" input_arg { name: "t" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedBiasAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedBiasAdd.pbtxt index 3c94fb54f0a497..b479c2c54e4d26 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedBiasAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedBiasAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedBiasAdd" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConcat.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConcat.pbtxt index 2752ebfca09e38..449f588ac8f498 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConcat.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConcat.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConcat" input_arg { name: "concat_dim" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2D.pbtxt index 3680e575ace2f3..b1cf1c8d334182 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndRelu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndRelu.pbtxt index c9f1696d1ecf67..229e4c436dd622 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndRelu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndRelu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DAndRelu" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndReluAndRequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndReluAndRequantize.pbtxt index 1cbc11d524e167..bc566896f53dfe 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndReluAndRequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndReluAndRequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DAndReluAndRequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndRequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndRequantize.pbtxt index d3166c425a984f..5d26709f14e31c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndRequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DAndRequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DAndRequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DPerChannel.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DPerChannel.pbtxt index 06c3d5f115fab3..93640944477061 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DPerChannel.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DPerChannel.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DPerChannel" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBias.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBias.pbtxt index f059cf786d14f0..8372a882260457 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBias.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBias.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DWithBias" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndRelu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndRelu.pbtxt index 4eef8cb4c4f3c2..af0ce39a844546 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndRelu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndRelu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DWithBiasAndRelu" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndReluAndRequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndReluAndRequantize.pbtxt index 174a196a036974..599f19e666dcdc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndReluAndRequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndReluAndRequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DWithBiasAndReluAndRequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndRequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndRequantize.pbtxt index 96ed2852b68046..8cf8fbb2eae658 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndRequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasAndRequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DWithBiasAndRequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.pbtxt index 37f884f1cb0e88..e46786a9a74a86 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSumAndRelu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSumAndRelu.pbtxt index ed100f68a9b58a..d74439b670ee37 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSumAndRelu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSumAndRelu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DWithBiasSumAndRelu" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSumAndReluAndRequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSumAndReluAndRequantize.pbtxt index d4a3c49b46c5c7..70c2366a19f2bd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSumAndReluAndRequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedConv2DWithBiasSumAndReluAndRequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedConv2DWithBiasSumAndReluAndRequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2D.pbtxt index 6b4163c799cdfa..f88bba239bf07b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedDepthwiseConv2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBias.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBias.pbtxt index c9e584543f3767..4faf839b3b6651 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBias.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBias.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedDepthwiseConv2DWithBias" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBiasAndRelu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBiasAndRelu.pbtxt index a4bb2fcd372ded..6a4b514e23ffec 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBiasAndRelu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBiasAndRelu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedDepthwiseConv2DWithBiasAndRelu" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.pbtxt index c24ba3165a8aca..3de6c3f9e28f47 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedInstanceNorm.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedInstanceNorm.pbtxt index 720358f1bd01d7..98136d82ebda45 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedInstanceNorm.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedInstanceNorm.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedInstanceNorm" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMul.pbtxt index d1332abc5169ca..7e4707a316f80d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedMatMul" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBias.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBias.pbtxt index 55c80678a6eb90..a59adb7f78c6d7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBias.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBias.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedMatMulWithBias" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndDequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndDequantize.pbtxt index 6c6370a1ac0191..04ecfdbd855f83 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndDequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndDequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedMatMulWithBiasAndDequantize" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndRelu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndRelu.pbtxt index ecdeb1d05d1c90..cd0acb9d721657 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndRelu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndRelu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedMatMulWithBiasAndRelu" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndReluAndRequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndReluAndRequantize.pbtxt index 140ed993453766..b591d3fb37c868 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndReluAndRequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndReluAndRequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedMatMulWithBiasAndReluAndRequantize" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndRequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndRequantize.pbtxt index 16b180475e46e2..1aab9762eea036 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndRequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMatMulWithBiasAndRequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedMatMulWithBiasAndRequantize" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMaxPool.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMaxPool.pbtxt index 9164c0fcfc6160..47d6ac80518ef9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMaxPool.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMaxPool.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedMaxPool" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMul.pbtxt index 97c025672c48b2..795ab1341d9c67 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedMul" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedRelu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedRelu.pbtxt index 77fb5c2eeb9041..724d8b3946bdb1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedRelu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedRelu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedRelu" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedRelu6.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedRelu6.pbtxt index b76f1159ec4f16..0f389d5eae9337 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedRelu6.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedRelu6.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedRelu6" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedReluX.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedReluX.pbtxt index 772ac59eba26ff..9ee6f0d2e274b9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedReluX.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedReluX.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedReluX" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedReshape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedReshape.pbtxt index fa7b1fee5cf5f1..f54db98943cc14 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedReshape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedReshape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedReshape" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QuantizedResizeBilinear.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QuantizedResizeBilinear.pbtxt index aca635c5016a1a..bee577ed23b411 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QuantizedResizeBilinear.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QuantizedResizeBilinear.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QuantizedResizeBilinear" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueClose.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueClose.pbtxt index 0a4a305cc97568..582eeccd6a263a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueClose.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueClose.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueClose" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueCloseV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueCloseV2.pbtxt index ab9c620c7fb7b4..e0544c13e654b4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueCloseV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueCloseV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueCloseV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeue.pbtxt index f4a3b6fe195c42..f06745f20fe456 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueDequeue" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueMany.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueMany.pbtxt index 986897a5bcd27f..374ecfb18a87c3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueMany.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueMany.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueDequeueMany" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueManyV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueManyV2.pbtxt index 355b7905840281..f3ebc6c7e59288 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueManyV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueManyV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueDequeueManyV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueUpTo.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueUpTo.pbtxt index 6e006006a784f5..6fa30ac810a0e5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueUpTo.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueUpTo.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueDequeueUpTo" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueUpToV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueUpToV2.pbtxt index 5901d313926bdf..2016cc7f04a0ad 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueUpToV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueUpToV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueDequeueUpToV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueV2.pbtxt index 44a893ea7c3a00..e338ccbd355a76 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueDequeueV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueDequeueV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueue.pbtxt index 689046ebae4e2d..fb94d288f2400e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueEnqueue" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueMany.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueMany.pbtxt index f3cbf429f28639..2d9582430725b9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueMany.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueMany.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueEnqueueMany" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueManyV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueManyV2.pbtxt index 159b3012075455..c327d27e2f11f6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueManyV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueManyV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueEnqueueManyV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueV2.pbtxt index e6bf061965d84b..da8cdd3cd67c48 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueEnqueueV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueEnqueueV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueIsClosed.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueIsClosed.pbtxt index 42426552b51808..11a421b27c85e3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueIsClosed.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueIsClosed.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueIsClosed" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueIsClosedV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueIsClosedV2.pbtxt index efe540401c9591..7cf1fde1bcee16 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueIsClosedV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueIsClosedV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueIsClosedV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueSize.pbtxt index 4dfe7aff427356..d2a49624f20ded 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueSize" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/QueueSizeV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/QueueSizeV2.pbtxt index c7e11d7cf326e4..46eb229a3fa7e6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/QueueSizeV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/QueueSizeV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "QueueSizeV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RFFT.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RFFT.pbtxt index 0879b0cfe5f2ae..02456ea217b791 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RFFT.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RFFT.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RFFT" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RFFT2D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RFFT2D.pbtxt index 79ed06b41d4dea..f3676f45cde226 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RFFT2D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RFFT2D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RFFT2D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RFFT3D.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RFFT3D.pbtxt index 754d372cf560ce..6475cd47316c19 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RFFT3D.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RFFT3D.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RFFT3D" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RFFTND.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RFFTND.pbtxt index 7f24e19efc06bf..8f75bea6baef82 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RFFTND.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RFFTND.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RFFTND" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RGBToHSV.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RGBToHSV.pbtxt index 22b6995682fb33..9ed50d337d0346 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RGBToHSV.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RGBToHSV.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RGBToHSV" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedBincount.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedBincount.pbtxt index 3fda312a6a4653..4f5fb24109cad4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedBincount.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedBincount.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedBincount" input_arg { name: "splits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedCountSparseOutput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedCountSparseOutput.pbtxt index 644b0eca3ad379..aa1a4e07aafaa2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedCountSparseOutput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedCountSparseOutput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedCountSparseOutput" input_arg { name: "splits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedCross.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedCross.pbtxt index 2407315da2ee8a..0e9fe0adcfba2c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedCross.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedCross.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedCross" input_arg { name: "ragged_values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedFillEmptyRows.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedFillEmptyRows.pbtxt index 54ba7be3cb1fdc..4587abc0f66e9a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedFillEmptyRows.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedFillEmptyRows.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedFillEmptyRows" input_arg { name: "value_rowids" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedFillEmptyRowsGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedFillEmptyRowsGrad.pbtxt index 04f8b3e6f3a821..ea9546d504068f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedFillEmptyRowsGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedFillEmptyRowsGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedFillEmptyRowsGrad" input_arg { name: "reverse_index_map" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedGather.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedGather.pbtxt index 5e0e69e365a36c..afa14e8e8a8b20 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedGather.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedGather.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedGather" input_arg { name: "params_nested_splits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedRange.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedRange.pbtxt index e895110b6372a1..866c9b472d906d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedRange.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedRange.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedRange" input_arg { name: "starts" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorFromVariant.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorFromVariant.pbtxt index 50d8a8dd1b86db..5c750d0ce7e567 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorFromVariant.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorFromVariant.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedTensorFromVariant" input_arg { name: "encoded_ragged" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToSparse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToSparse.pbtxt index bcbb0d25bc94ff..f9172b4cf3772a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToSparse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToSparse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedTensorToSparse" input_arg { name: "rt_nested_splits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToTensor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToTensor.pbtxt index 8ecd7fb2c86ec3..60fceb565ebc03 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToTensor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToTensor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedTensorToTensor" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToVariant.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToVariant.pbtxt index 93d6b27fd053a1..f875a2a2ca7fae 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToVariant.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToVariant.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedTensorToVariant" input_arg { name: "rt_nested_splits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToVariantGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToVariantGradient.pbtxt index fc6ede9bd741bf..45f2fcefe04210 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToVariantGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RaggedTensorToVariantGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RaggedTensorToVariantGradient" input_arg { name: "encoded_ragged_grad" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomCrop.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomCrop.pbtxt index 9ca7d2f4b5b3bd..a5353cf58d5d41 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomCrop.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomCrop.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomCrop" input_arg { name: "image" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomDataset.pbtxt index 99fb23493250ca..d646d19b2e5ef2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomDataset" input_arg { name: "seed" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomDatasetV2.pbtxt index d561045e0b738d..3cf01f7644e9f1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomDatasetV2" input_arg { name: "seed" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomGamma.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomGamma.pbtxt index 7c655027ce1832..2f38a20f8f0a2e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomGamma.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomGamma.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomGamma" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomGammaGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomGammaGrad.pbtxt index f0c1b5033a5ddd..1e1c0723f6cbfe 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomGammaGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomGammaGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomGammaGrad" input_arg { name: "alpha" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomIndexShuffle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomIndexShuffle.pbtxt index 06954c6cc639a3..22e6b88af0a921 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomIndexShuffle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomIndexShuffle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomIndexShuffle" input_arg { name: "index" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomPoisson.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomPoisson.pbtxt index 33d8920e36dcaa..5499e8d678c590 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomPoisson.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomPoisson.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomPoisson" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomPoissonV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomPoissonV2.pbtxt index 1330833972ef23..6c3d9827a35cf8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomPoissonV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomPoissonV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomPoissonV2" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomShuffle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomShuffle.pbtxt index 189e7d770a0652..ddd1a8d3f2b2c1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomShuffle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomShuffle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomShuffle" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomShuffleQueue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomShuffleQueue.pbtxt index 5793974674e561..550acae8d5be11 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomShuffleQueue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomShuffleQueue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomShuffleQueue" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomShuffleQueueV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomShuffleQueueV2.pbtxt index b73d70541fafab..7d9807c4e9564b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomShuffleQueueV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomShuffleQueueV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomShuffleQueueV2" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomStandardNormal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomStandardNormal.pbtxt index c693a9f7ba2e17..71fe5e5ef326b8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomStandardNormal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomStandardNormal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomStandardNormal" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomUniform.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomUniform.pbtxt index 63c637f0de1309..449a9ef973929c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomUniform.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomUniform.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomUniform" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RandomUniformInt.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RandomUniformInt.pbtxt index 35ad67cb371c9e..3b89715afcad98 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RandomUniformInt.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RandomUniformInt.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RandomUniformInt" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Range.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Range.pbtxt index 6c2023eb68e78c..306116f59bfd42 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Range.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Range.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Range" input_arg { name: "start" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RangeDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RangeDataset.pbtxt index 91fe787a7be594..9121cf0567e518 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RangeDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RangeDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RangeDataset" input_arg { name: "start" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Rank.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Rank.pbtxt index d44aeb9ec5fe4e..c12fd9a0abf07b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Rank.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Rank.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Rank" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReadFile.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReadFile.pbtxt index 8bf1beeb43a473..ce1985ec3c5cba 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReadFile.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReadFile.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReadFile" input_arg { name: "filename" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReadVariableOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReadVariableOp.pbtxt index 2123cfedd6cb68..5459632d58351e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReadVariableOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReadVariableOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReadVariableOp" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReadVariableXlaSplitND.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReadVariableXlaSplitND.pbtxt index be5364ceb1f1b5..f5216da5c96aac 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReadVariableXlaSplitND.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReadVariableXlaSplitND.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReadVariableXlaSplitND" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderNumRecordsProduced.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderNumRecordsProduced.pbtxt index 4514d3aac61611..50b1ea00da8f29 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderNumRecordsProduced.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderNumRecordsProduced.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderNumRecordsProduced" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderNumRecordsProducedV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderNumRecordsProducedV2.pbtxt index bff4305f3bfbb8..f560f01d443b08 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderNumRecordsProducedV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderNumRecordsProducedV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderNumRecordsProducedV2" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderNumWorkUnitsCompleted.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderNumWorkUnitsCompleted.pbtxt index 4ef2c1950f9507..b1e361e0119f22 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderNumWorkUnitsCompleted.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderNumWorkUnitsCompleted.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderNumWorkUnitsCompleted" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderNumWorkUnitsCompletedV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderNumWorkUnitsCompletedV2.pbtxt index 8f136052d08810..ee4c93e19bab64 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderNumWorkUnitsCompletedV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderNumWorkUnitsCompletedV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderNumWorkUnitsCompletedV2" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderRead.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderRead.pbtxt index fefc032a3460d9..b2a933892c4226 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderRead.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderRead.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderRead" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderReadUpTo.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderReadUpTo.pbtxt index 148095de5cae3a..e3bb64ec391d86 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderReadUpTo.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderReadUpTo.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderReadUpTo" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderReadUpToV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderReadUpToV2.pbtxt index f007588ebb608b..2ad62b16c9e928 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderReadUpToV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderReadUpToV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderReadUpToV2" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderReadV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderReadV2.pbtxt index 4e74f1b5b75080..3a1573147dcbe4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderReadV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderReadV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderReadV2" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderReset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderReset.pbtxt index 4379c400833e0f..9607f83c47016d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderReset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderReset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderReset" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderResetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderResetV2.pbtxt index eecd28bb726e79..56f862ae795184 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderResetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderResetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderResetV2" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderRestoreState.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderRestoreState.pbtxt index 1c5f71fe0f3521..717a5c34c8de93 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderRestoreState.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderRestoreState.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderRestoreState" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderRestoreStateV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderRestoreStateV2.pbtxt index 1b10902e081b7b..f75b04fc59d4e0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderRestoreStateV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderRestoreStateV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderRestoreStateV2" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderSerializeState.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderSerializeState.pbtxt index d3a95d06f33347..2f708cb8926695 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderSerializeState.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderSerializeState.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderSerializeState" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReaderSerializeStateV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReaderSerializeStateV2.pbtxt index 4946352509bcb4..c4ade1409fbacd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReaderSerializeStateV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReaderSerializeStateV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReaderSerializeStateV2" input_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Real.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Real.pbtxt index 0f326a3de147ed..d7e783ebe72f15 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Real.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Real.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Real" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RealDiv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RealDiv.pbtxt index a5a78f14cc7c5a..6f725e22c64473 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RealDiv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RealDiv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RealDiv" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RebatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RebatchDataset.pbtxt index bc70fcfadadb8e..176f94a3329c2b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RebatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RebatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RebatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RebatchDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RebatchDatasetV2.pbtxt index fda65d7bfaba18..fd853b65b2685c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RebatchDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RebatchDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RebatchDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Reciprocal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Reciprocal.pbtxt index 35cab85ee2a898..7e03554871a4eb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Reciprocal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Reciprocal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Reciprocal" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReciprocalGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReciprocalGrad.pbtxt index eea1bbe1c8d667..8884c796da5e6e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReciprocalGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReciprocalGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReciprocalGrad" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RecordInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RecordInput.pbtxt index c353a17b318002..a72374420ee28e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RecordInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RecordInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RecordInput" output_arg { name: "records" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Recv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Recv.pbtxt index 87c7eb65ca87e8..e6717e57f8dedf 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Recv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Recv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Recv" output_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RecvTPUEmbeddingActivations.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RecvTPUEmbeddingActivations.pbtxt index 4cb9bd42ec045c..0fec828421f91b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RecvTPUEmbeddingActivations.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RecvTPUEmbeddingActivations.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RecvTPUEmbeddingActivations" output_arg { name: "outputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReduceDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReduceDataset.pbtxt index c98f1045e53af2..a1a17bc20c534d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReduceDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReduceDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReduceDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReduceJoin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReduceJoin.pbtxt index 3603cf38c0b098..28880fcfb06a99 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReduceJoin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReduceJoin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReduceJoin" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RefEnter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RefEnter.pbtxt index 8c7921571d9d24..9af599d1b338bb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RefEnter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RefEnter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RefEnter" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RefExit.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RefExit.pbtxt index b13adfac00995a..1f9e84e7fade8b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RefExit.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RefExit.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RefExit" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RefIdentity.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RefIdentity.pbtxt index 4fa1105b6952d0..d2293fdf467a7b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RefIdentity.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RefIdentity.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RefIdentity" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RefMerge.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RefMerge.pbtxt index 31913cc794708d..fc4794d2f2f9d7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RefMerge.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RefMerge.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RefMerge" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RefNextIteration.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RefNextIteration.pbtxt index 453bce7c335929..d447a3a87b23e4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RefNextIteration.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RefNextIteration.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RefNextIteration" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RefSelect.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RefSelect.pbtxt index 688a34a5886fd4..aa2645f9ff17a3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RefSelect.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RefSelect.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RefSelect" input_arg { name: "index" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RefSwitch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RefSwitch.pbtxt index fba45dc4f98e00..6d12be2e5a8fa9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RefSwitch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RefSwitch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RefSwitch" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RegexFullMatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RegexFullMatch.pbtxt index 7b337477adb60d..f2c0b7b99f1c71 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RegexFullMatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RegexFullMatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RegexFullMatch" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RegexReplace.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RegexReplace.pbtxt index d8e7b8c143f4be..591773ce37416d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RegexReplace.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RegexReplace.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RegexReplace" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RegisterDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RegisterDataset.pbtxt index 3cac6a9d2a0b6d..a144d96163e746 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RegisterDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RegisterDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RegisterDataset" input_arg { name: "dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RegisterDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RegisterDatasetV2.pbtxt index 3031d65c39b18b..1ebced2af8d75f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RegisterDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RegisterDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RegisterDatasetV2" input_arg { name: "dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Relayout.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Relayout.pbtxt index 118574bf66fa05..da96a32cf86245 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Relayout.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Relayout.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Relayout" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RelayoutLike.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RelayoutLike.pbtxt index df5b2f89a73424..7a5af4f919e1c9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RelayoutLike.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RelayoutLike.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RelayoutLike" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Relu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Relu.pbtxt index 42ae6688c797ef..703fbbeff56f91 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Relu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Relu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Relu" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Relu6.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Relu6.pbtxt index 110c799682733e..311c3297a16411 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Relu6.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Relu6.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Relu6" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Relu6Grad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Relu6Grad.pbtxt index 5455dc384cca0c..618e13a2297acc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Relu6Grad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Relu6Grad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Relu6Grad" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReluGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReluGrad.pbtxt index 89c486883f26c7..b14f23bb30bddc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReluGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReluGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReluGrad" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RemoteCall.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RemoteCall.pbtxt index efe22174170af6..c6bc594510340c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RemoteCall.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RemoteCall.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RemoteCall" input_arg { name: "target" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RepeatDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RepeatDataset.pbtxt index f8f2cd8da0f50c..b7914feb4dfbc7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RepeatDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RepeatDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RepeatDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RequantizationRange.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RequantizationRange.pbtxt index 9ff7aa997d1a0c..6a489081eda2da 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RequantizationRange.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RequantizationRange.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RequantizationRange" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RequantizationRangePerChannel.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RequantizationRangePerChannel.pbtxt index f0352b164b215b..b621afb7a80b52 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RequantizationRangePerChannel.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RequantizationRangePerChannel.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RequantizationRangePerChannel" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Requantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Requantize.pbtxt index a142594170dc41..c04d32f19aeb16 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Requantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Requantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Requantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RequantizePerChannel.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RequantizePerChannel.pbtxt index 1e5b406fc0fb1e..3ed03fe12de763 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RequantizePerChannel.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RequantizePerChannel.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RequantizePerChannel" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Reshape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Reshape.pbtxt index fcbd49a8ce805f..e422ffa2470633 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Reshape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Reshape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Reshape" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResizeArea.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResizeArea.pbtxt index 315069f44fa499..ec861564fc4cc6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResizeArea.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResizeArea.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResizeArea" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResizeBicubic.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResizeBicubic.pbtxt index 2de5ed2e280f54..5e479a9432b876 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResizeBicubic.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResizeBicubic.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResizeBicubic" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResizeBicubicGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResizeBicubicGrad.pbtxt index b085c1d6f46f67..6de227d7e0fd8f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResizeBicubicGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResizeBicubicGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResizeBicubicGrad" input_arg { name: "grads" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResizeBilinear.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResizeBilinear.pbtxt index ea43150a420c04..b991a8ad63ddc8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResizeBilinear.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResizeBilinear.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResizeBilinear" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResizeBilinearGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResizeBilinearGrad.pbtxt index a78a0b699d7072..79d1605fd1c4a8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResizeBilinearGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResizeBilinearGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResizeBilinearGrad" input_arg { name: "grads" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResizeNearestNeighbor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResizeNearestNeighbor.pbtxt index a61928b06f38ff..18d87c3583c856 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResizeNearestNeighbor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResizeNearestNeighbor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResizeNearestNeighbor" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResizeNearestNeighborGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResizeNearestNeighborGrad.pbtxt index 5bf5aa96eccc2b..001525b07337d6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResizeNearestNeighborGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResizeNearestNeighborGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResizeNearestNeighborGrad" input_arg { name: "grads" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorApplyGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorApplyGradient.pbtxt index 05ec4d8a404634..a1e03efd365a02 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorApplyGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorApplyGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceAccumulatorApplyGradient" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorNumAccumulated.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorNumAccumulated.pbtxt index 5773bd5cc85b18..398171da210bf4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorNumAccumulated.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorNumAccumulated.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceAccumulatorNumAccumulated" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorSetGlobalStep.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorSetGlobalStep.pbtxt index 902748a29f5836..3e9c5a29882859 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorSetGlobalStep.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorSetGlobalStep.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceAccumulatorSetGlobalStep" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorTakeGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorTakeGradient.pbtxt index 5f37567706f7f0..d690fd198e62bd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorTakeGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceAccumulatorTakeGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceAccumulatorTakeGradient" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdaMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdaMax.pbtxt index ef87929c4f5eb6..3f307b70b7cac2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdaMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdaMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyAdaMax" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdadelta.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdadelta.pbtxt index 2a671e082a687f..f4c58b9860a033 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdadelta.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdadelta.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyAdadelta" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagrad.pbtxt index 67881c44ac854d..3c8e9d071a98b3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyAdagrad" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagradDA.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagradDA.pbtxt index 1bab7daf7aea46..996bc0b1a5cd71 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagradDA.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagradDA.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyAdagradDA" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagradV2.pbtxt index 66aaa456bedf82..09168ff5c4e03b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdagradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyAdagradV2" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdam.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdam.pbtxt index e76225de60701e..88cafe521d6800 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdam.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdam.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyAdam" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdamWithAmsgrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdamWithAmsgrad.pbtxt index ab8696a3263a89..ccd84e52d5a820 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdamWithAmsgrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAdamWithAmsgrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyAdamWithAmsgrad" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAddSign.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAddSign.pbtxt index 44f5b7d88e0c07..4653229b0aebf1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAddSign.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyAddSign.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyAddSign" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyCenteredRMSProp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyCenteredRMSProp.pbtxt index b7c69b1c832fff..f6fa5b2a351164 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyCenteredRMSProp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyCenteredRMSProp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyCenteredRMSProp" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyFtrl.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyFtrl.pbtxt index f94944686d5a14..f917b0fc22fa6f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyFtrl.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyFtrl.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyFtrl" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyFtrlV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyFtrlV2.pbtxt index 597ce4ab164aa0..d5586b7f73de74 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyFtrlV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyFtrlV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyFtrlV2" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyGradientDescent.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyGradientDescent.pbtxt index 6bd3170e2c6bde..83639c23ea1d7d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyGradientDescent.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyGradientDescent.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyGradientDescent" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyKerasMomentum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyKerasMomentum.pbtxt index 12e22d4167d7dc..737020cd744516 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyKerasMomentum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyKerasMomentum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyKerasMomentum" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyMomentum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyMomentum.pbtxt index 20de47ab2895d0..a6d0dd9243836e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyMomentum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyMomentum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyMomentum" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyPowerSign.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyPowerSign.pbtxt index 261463a5512584..5a7e103c707c04 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyPowerSign.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyPowerSign.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyPowerSign" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyProximalAdagrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyProximalAdagrad.pbtxt index 2c6007597ca3b1..c3adae4e5fc900 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyProximalAdagrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyProximalAdagrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyProximalAdagrad" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyProximalGradientDescent.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyProximalGradientDescent.pbtxt index dbe02a88ff079d..9c55be37dfc8da 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyProximalGradientDescent.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyProximalGradientDescent.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyProximalGradientDescent" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyRMSProp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyRMSProp.pbtxt index 90f24a83fb81e7..b0be4acdf19643 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyRMSProp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceApplyRMSProp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceApplyRMSProp" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceConditionalAccumulator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceConditionalAccumulator.pbtxt index 389486faef4e8e..cf3c150ccb9984 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceConditionalAccumulator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceConditionalAccumulator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceConditionalAccumulator" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceCountUpTo.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceCountUpTo.pbtxt index 3f07aa17a613e4..352935c2c167a4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceCountUpTo.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceCountUpTo.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceCountUpTo" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceGather.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceGather.pbtxt index 47a841492de645..9aa33d994c6cea 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceGather.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceGather.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceGather" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceGatherNd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceGatherNd.pbtxt index f8df557edbbdbb..04794f402cde00 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceGatherNd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceGatherNd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceGatherNd" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterAdd.pbtxt index 5b10cf3f16c0ce..a061da9db360bc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterAdd" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterDiv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterDiv.pbtxt index 0b94ef0dec43bd..d5b683c107ef47 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterDiv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterDiv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterDiv" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMax.pbtxt index d6e97844047d95..d7f1e9c8a37e69 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterMax" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMin.pbtxt index d012a861218190..617f12b35c83a7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterMin" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMul.pbtxt index aa859ad4d252fe..313b2ed03c038b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterMul" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdAdd.pbtxt index 5cc51a5559e4a0..507b30eff9a2d6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterNdAdd" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdMax.pbtxt index 9933b54896edff..8a40ac6e842a03 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterNdMax" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdMin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdMin.pbtxt index bd0e7589184074..7567b43d39f025 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdMin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdMin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterNdMin" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdSub.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdSub.pbtxt index bd5785e83d4bf8..9d1a74daa931f8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdSub.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdSub.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterNdSub" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdUpdate.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdUpdate.pbtxt index 0b6ae92e036d84..4305163fc92280 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdUpdate.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterNdUpdate.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterNdUpdate" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterSub.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterSub.pbtxt index 32257603e3aeef..3a267f9f63b289 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterSub.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterSub.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterSub" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterUpdate.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterUpdate.pbtxt index 320a7b0285e79d..55101b84c33871 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterUpdate.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceScatterUpdate.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceScatterUpdate" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdadelta.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdadelta.pbtxt index 21aa0addab9259..562e667d584b04 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdadelta.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdadelta.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyAdadelta" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagrad.pbtxt index 7cc9e1f5cd2927..4588dc668bd9b4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyAdagrad" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagradDA.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagradDA.pbtxt index c28ddc89e06058..e9ef6077dcedf7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagradDA.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagradDA.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyAdagradDA" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagradV2.pbtxt index a14ba1dc5d2d55..e0724d11142c5a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyAdagradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyAdagradV2" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyCenteredRMSProp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyCenteredRMSProp.pbtxt index 855c9982f3f6f5..6d02f81629efda 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyCenteredRMSProp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyCenteredRMSProp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyCenteredRMSProp" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyFtrl.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyFtrl.pbtxt index 7bb28e90106812..8ac38c09e514b4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyFtrl.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyFtrl.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyFtrl" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyFtrlV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyFtrlV2.pbtxt index c43d4a60f27998..cff68beaf1cbe7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyFtrlV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyFtrlV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyFtrlV2" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyKerasMomentum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyKerasMomentum.pbtxt index 96b22b6cf0bf4a..648d85a4989da1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyKerasMomentum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyKerasMomentum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyKerasMomentum" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyMomentum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyMomentum.pbtxt index 03b05570cb5ccd..ec3e5a96db592b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyMomentum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyMomentum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyMomentum" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyProximalAdagrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyProximalAdagrad.pbtxt index 0093fb2c50f763..59ae118cd9522d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyProximalAdagrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyProximalAdagrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyProximalAdagrad" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyProximalGradientDescent.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyProximalGradientDescent.pbtxt index 0844c8d434921c..246f442d27756e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyProximalGradientDescent.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyProximalGradientDescent.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyProximalGradientDescent" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyRMSProp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyRMSProp.pbtxt index 87803f3e89366f..8735a6fcacc18e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyRMSProp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceSparseApplyRMSProp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceSparseApplyRMSProp" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ResourceStridedSliceAssign.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ResourceStridedSliceAssign.pbtxt index 430195fab5c0d0..867f205958e76c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ResourceStridedSliceAssign.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ResourceStridedSliceAssign.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ResourceStridedSliceAssign" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Restore.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Restore.pbtxt index 269b27c0b71f59..1db029076032ac 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Restore.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Restore.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Restore" input_arg { name: "file_pattern" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RestoreSlice.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RestoreSlice.pbtxt index 74303c89f05152..03d2aa3bbf56ff 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RestoreSlice.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RestoreSlice.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RestoreSlice" input_arg { name: "file_pattern" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RestoreV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RestoreV2.pbtxt index 99319760e597e7..a88db314bc4906 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RestoreV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RestoreV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RestoreV2" input_arg { name: "prefix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveAllTPUEmbeddingParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveAllTPUEmbeddingParameters.pbtxt index ea305f05a813bb..226bed39734bf1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveAllTPUEmbeddingParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveAllTPUEmbeddingParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveAllTPUEmbeddingParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingADAMParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingADAMParameters.pbtxt index adfc5f50337be2..dfef40371c8098 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingADAMParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingADAMParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingADAMParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdadeltaParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdadeltaParameters.pbtxt index 3c514f34bde42c..f887714c78b20e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdadeltaParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdadeltaParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingAdadeltaParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt index 747aecc018ccc4..9a84dc7b0cc57f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdagradMomentumParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingAdagradMomentumParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdagradParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdagradParameters.pbtxt index cc9f172411d370..27e1aa441b60ae 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdagradParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingAdagradParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingAdagradParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingCenteredRMSPropParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingCenteredRMSPropParameters.pbtxt index 6ce83985ca788f..afdcbd5cb5e7c9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingCenteredRMSPropParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingCenteredRMSPropParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingCenteredRMSPropParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingFTRLParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingFTRLParameters.pbtxt index d0d5fd74a5f8b1..161fde82db383b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingFTRLParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingFTRLParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingFTRLParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt index 1125ad0e594c68..633bf51cab0d68 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingFrequencyEstimatorParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingFrequencyEstimatorParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingMDLAdagradLightParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingMDLAdagradLightParameters.pbtxt index c2e4047eacfc4a..bf9d96bf37dea8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingMDLAdagradLightParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingMDLAdagradLightParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingMDLAdagradLightParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingMomentumParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingMomentumParameters.pbtxt index 6db5898c1e3a9a..3b8f98b755312f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingMomentumParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingMomentumParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingMomentumParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingProximalAdagradParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingProximalAdagradParameters.pbtxt index 5cf6dbfe8d767a..bd7c8288ff2af4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingProximalAdagradParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingProximalAdagradParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingProximalAdagradParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingProximalYogiParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingProximalYogiParameters.pbtxt index dedd0ee1826811..a74c0caa0862ad 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingProximalYogiParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingProximalYogiParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingProximalYogiParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingRMSPropParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingRMSPropParameters.pbtxt index d4028e2d164b75..137d3bd497dd76 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingRMSPropParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingRMSPropParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingRMSPropParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingStochasticGradientDescentParameters.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingStochasticGradientDescentParameters.pbtxt index 44b3a038b1ca5e..da65f5ae98e542 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingStochasticGradientDescentParameters.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RetrieveTPUEmbeddingStochasticGradientDescentParameters.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RetrieveTPUEmbeddingStochasticGradientDescentParameters" output_arg { name: "parameters" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Reverse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Reverse.pbtxt index 9eced34f0a50d9..aecf71f072f0cd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Reverse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Reverse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Reverse" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReverseSequence.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReverseSequence.pbtxt index c3119a2fe49707..74d3601e1f4aba 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReverseSequence.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReverseSequence.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReverseSequence" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ReverseV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ReverseV2.pbtxt index fc9a980a8712bc..8cbc98e30a5390 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ReverseV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ReverseV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ReverseV2" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RewriteDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RewriteDataset.pbtxt index d831b7cac3f549..3e3e43b46fed65 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RewriteDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RewriteDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RewriteDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RightShift.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RightShift.pbtxt index 76bcdc4bd20b38..97257a019725a2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RightShift.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RightShift.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RightShift" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Rint.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Rint.pbtxt index f128a08a47a130..feed3bca0b3ef1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Rint.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Rint.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Rint" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscAbs.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscAbs.pbtxt index a1d64124f38587..2b3111bf580739 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscAbs.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscAbs.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscAbs" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscAdd.pbtxt index d98094d312b9ad..86cc81e964c101 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscAdd" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscBinaryArithmetic.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscBinaryArithmetic.pbtxt index f2919e4c26cbf9..9d5f080ed02465 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscBinaryArithmetic.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscBinaryArithmetic.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscBinaryArithmetic" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscBinaryComparison.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscBinaryComparison.pbtxt index 88731716d93946..d131476f8fe944 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscBinaryComparison.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscBinaryComparison.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscBinaryComparison" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscBitcast.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscBitcast.pbtxt index 738659831a958d..1d37369adec753 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscBitcast.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscBitcast.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscBitcast" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscBroadcast.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscBroadcast.pbtxt index 61318f39439e77..e81e8413dfb88b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscBroadcast.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscBroadcast.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscBroadcast" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscCast.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscCast.pbtxt index 2b82bc3ddc9939..344d0496b27962 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscCast.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscCast.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscCast" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscCeil.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscCeil.pbtxt index 904aca9f50b86d..ff1fefc0115839 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscCeil.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscCeil.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscCeil" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscCholesky.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscCholesky.pbtxt index 3b8d59b6efac76..c6b24d107be7f3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscCholesky.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscCholesky.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscCholesky" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscConcat.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscConcat.pbtxt index 0b062957805f5f..889de347165b26 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscConcat.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscConcat.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscConcat" input_arg { name: "values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscCondition.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscCondition.pbtxt index a348796ece3143..859814c65905b2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscCondition.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscCondition.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscCondition" input_arg { name: "pred" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscConv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscConv.pbtxt index 06103b7729dab5..e9d326be8f286f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscConv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscConv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscConv" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscCos.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscCos.pbtxt index d89ad75fc9fa25..98cebc12c5a9ee 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscCos.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscCos.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscCos" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscDiv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscDiv.pbtxt index d2196f83d64b61..98aaf5ae97b8c5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscDiv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscDiv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscDiv" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscDot.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscDot.pbtxt index 55101191acb75f..0b3ff5c11e3646 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscDot.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscDot.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscDot" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscExp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscExp.pbtxt index 9775069c7f693e..4386db96ae9960 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscExp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscExp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscExp" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscFft.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscFft.pbtxt index de6960750f34ef..605cd7edd6e04e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscFft.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscFft.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscFft" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscFloor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscFloor.pbtxt index 71b73dca0fe261..55941ca352c2ec 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscFloor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscFloor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscFloor" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscGather.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscGather.pbtxt index 87681965ec7f3d..18d4ba3fdc90d4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscGather.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscGather.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscGather" input_arg { name: "params" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscImag.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscImag.pbtxt index c6e17d5296d93e..555ea9d071fe6a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscImag.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscImag.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscImag" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscIsFinite.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscIsFinite.pbtxt index 1b6574899d378d..19a4ae6617c0cb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscIsFinite.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscIsFinite.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscIsFinite" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscLog.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscLog.pbtxt index e52bc8fbe8ad02..23bbef3b07a3b0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscLog.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscLog.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscLog" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalAnd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalAnd.pbtxt index 04777105ffae7f..8bd4410a056174 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalAnd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalAnd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscLogicalAnd" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalNot.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalNot.pbtxt index d743f6c5f935bf..3496ef02e435a8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalNot.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalNot.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscLogicalNot" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalOr.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalOr.pbtxt index 5550a34f7d9bc1..3cf31921d5aa0c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalOr.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscLogicalOr.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscLogicalOr" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscMax.pbtxt index 9ecd924242589f..11c4517d7566b3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscMax" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscMin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscMin.pbtxt index 225a66f5f952de..7ac92ec5ca4a12 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscMin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscMin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscMin" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscMul.pbtxt index 787d58e56f074d..e55fca7a910539 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscMul" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscNeg.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscNeg.pbtxt index 2d4a2b3425a37c..429d94153c2490 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscNeg.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscNeg.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscNeg" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscPad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscPad.pbtxt index 3707792d018c3c..13ea65b0bd3974 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscPad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscPad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscPad" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscPool.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscPool.pbtxt index 766557c3b401f4..57847ff00d0cb4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscPool.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscPool.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscPool" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscPow.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscPow.pbtxt index 89d58894ce23f3..150c846fb0396f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscPow.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscPow.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscPow" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscRandomUniform.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscRandomUniform.pbtxt index 29261856fe30a6..2d3cd00a70c0ea 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscRandomUniform.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscRandomUniform.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscRandomUniform" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscReal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscReal.pbtxt index 5c299538003771..cf624262303984 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscReal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscReal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscReal" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscReduce.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscReduce.pbtxt index fc067edf50fa64..1dff780022c33e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscReduce.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscReduce.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscReduce" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscRem.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscRem.pbtxt index 5392ef47912f15..ffa8ff1bc61e82 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscRem.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscRem.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscRem" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscReshape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscReshape.pbtxt index a4508f5347d382..f686036497b850 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscReshape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscReshape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscReshape" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscReverse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscReverse.pbtxt index 3ee303ebc4ea2c..60dec4d9b9d64b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscReverse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscReverse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscReverse" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscScatter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscScatter.pbtxt index d37788f996fb13..5def9d11dc486b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscScatter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscScatter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscScatter" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscShape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscShape.pbtxt index a9e95b87a8abd1..615e2b7e8bbba9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscShape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscShape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscShape" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscSign.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscSign.pbtxt index c3478d94634773..db7894468abf7b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscSign.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscSign.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscSign" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscSlice.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscSlice.pbtxt index afdc888c886ef2..b09072c5ae7f08 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscSlice.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscSlice.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscSlice" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscSort.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscSort.pbtxt index 60ca56bc40cab6..c49a6951bdbeff 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscSort.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscSort.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscSort" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscSqueeze.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscSqueeze.pbtxt index 0c70b1088f6aba..bf4e7123ddabc0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscSqueeze.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscSqueeze.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscSqueeze" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscSub.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscSub.pbtxt index 8a83934419e223..2590f9fa34cce6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscSub.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscSub.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscSub" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscTranspose.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscTranspose.pbtxt index eb7dce89a9aa35..856b0d67ffb98c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscTranspose.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscTranspose.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscTranspose" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscTriangularSolve.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscTriangularSolve.pbtxt index c2095295f40a00..5b8518fc9bedd7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscTriangularSolve.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscTriangularSolve.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscTriangularSolve" input_arg { name: "matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscUnary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscUnary.pbtxt index d825acd378f0b8..0a7af35478abb0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscUnary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscUnary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscUnary" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RiscWhile.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RiscWhile.pbtxt index 9e4695be194b3f..8bb4745a1e135c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RiscWhile.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RiscWhile.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RiscWhile" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RngReadAndSkip.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RngReadAndSkip.pbtxt index d369d4dc9ae400..e64f5dd6d2680c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RngReadAndSkip.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RngReadAndSkip.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RngReadAndSkip" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RngSkip.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RngSkip.pbtxt index 6ef7c4b9ae69ea..dc3e9b948b7ed6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RngSkip.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RngSkip.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RngSkip" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Roll.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Roll.pbtxt index cfb7c101757c7b..ac81404fece17e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Roll.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Roll.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Roll" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Round.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Round.pbtxt index bb883d57b9a84e..c5685dc61439b1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Round.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Round.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Round" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Rsqrt.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Rsqrt.pbtxt index 87e8420b1e96f6..6d066c9e00cee9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Rsqrt.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Rsqrt.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Rsqrt" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/RsqrtGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/RsqrtGrad.pbtxt index 9298cb8a73e1dd..4509b1af361e3d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/RsqrtGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/RsqrtGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "RsqrtGrad" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SampleDistortedBoundingBox.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SampleDistortedBoundingBox.pbtxt index 527d0b2f5000c6..95b4a2ddd5f95f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SampleDistortedBoundingBox.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SampleDistortedBoundingBox.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SampleDistortedBoundingBox" input_arg { name: "image_size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SampleDistortedBoundingBoxV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SampleDistortedBoundingBoxV2.pbtxt index 3600eaf126b48b..d857ee0a68795d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SampleDistortedBoundingBoxV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SampleDistortedBoundingBoxV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SampleDistortedBoundingBoxV2" input_arg { name: "image_size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SamplingDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SamplingDataset.pbtxt index 549b71b08c77fd..160a9e9bb16588 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SamplingDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SamplingDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SamplingDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Save.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Save.pbtxt index c815380c490ad8..c632730bd91e7e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Save.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Save.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Save" input_arg { name: "filename" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SaveDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SaveDataset.pbtxt index 8b5b8fcbd1dbc7..18f99fbe3591ee 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SaveDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SaveDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SaveDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SaveDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SaveDatasetV2.pbtxt index e20303ad3b8d21..9407de7812bb12 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SaveDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SaveDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SaveDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SaveSlices.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SaveSlices.pbtxt index 4415c13dbb8f69..306d67bd688456 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SaveSlices.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SaveSlices.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SaveSlices" input_arg { name: "filename" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SaveV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SaveV2.pbtxt index c91f4d3159d0d3..d9bae4c8b8e12b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SaveV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SaveV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SaveV2" input_arg { name: "prefix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScalarSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScalarSummary.pbtxt index 48fa50e9382251..bf4948076abe9c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScalarSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScalarSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScalarSummary" input_arg { name: "tags" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScaleAndTranslate.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScaleAndTranslate.pbtxt index 4c3b0727ac80b0..516cca34539b9b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScaleAndTranslate.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScaleAndTranslate.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScaleAndTranslate" input_arg { name: "images" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScaleAndTranslateGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScaleAndTranslateGrad.pbtxt index 647f70653585b0..8eaa03c3933199 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScaleAndTranslateGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScaleAndTranslateGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScaleAndTranslateGrad" input_arg { name: "grads" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScanDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScanDataset.pbtxt index 7563a34c8c7df4..25de8c51a7a388 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScanDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScanDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScanDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterAdd.pbtxt index 70f2fc7f5ae9ae..0e47d683e05717 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterAdd" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterDiv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterDiv.pbtxt index c34776538354d3..2e75f22b0ed101 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterDiv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterDiv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterDiv" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterMax.pbtxt index 7ad8c98ea588fe..fe176e143b1874 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterMax" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterMin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterMin.pbtxt index e9dc8087a99349..7099d89f366c6a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterMin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterMin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterMin" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterMul.pbtxt index aa6f863e0bca77..149540b04aecc1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterMul" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterNd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterNd.pbtxt index 06877c844cbd08..75749c807b7368 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterNd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterNd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterNd" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdAdd.pbtxt index 409f7d35e5f1e8..d093276f490609 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterNdAdd" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdMax.pbtxt index 1425d91fc0563a..2af2bf50069c0c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterNdMax" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdMin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdMin.pbtxt index 996fd4036e1cb5..c2a91a0ed313c8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdMin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdMin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterNdMin" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdNonAliasingAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdNonAliasingAdd.pbtxt index 5cf7d9f91f111b..eb8f48444b62ed 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdNonAliasingAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdNonAliasingAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterNdNonAliasingAdd" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdSub.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdSub.pbtxt index 6e85f0669efa5e..c9c73604d8ad20 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdSub.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdSub.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterNdSub" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdUpdate.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdUpdate.pbtxt index c3f7a023e18568..73def71d094156 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterNdUpdate.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterNdUpdate.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterNdUpdate" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterSub.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterSub.pbtxt index bf168e222834a8..9665d98a94e2d7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterSub.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterSub.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterSub" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ScatterUpdate.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ScatterUpdate.pbtxt index fc91f285899ac2..2f292734f17fb6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ScatterUpdate.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ScatterUpdate.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ScatterUpdate" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SdcaFprint.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SdcaFprint.pbtxt index 53ee0176bcf390..979c0016b3400d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SdcaFprint.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SdcaFprint.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SdcaFprint" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SdcaOptimizer.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SdcaOptimizer.pbtxt index d13dd131607794..3746f9504ac61c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SdcaOptimizer.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SdcaOptimizer.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SdcaOptimizer" input_arg { name: "sparse_example_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SdcaOptimizerV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SdcaOptimizerV2.pbtxt index 2052c0c4a26703..cb16c8f55a1677 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SdcaOptimizerV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SdcaOptimizerV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SdcaOptimizerV2" input_arg { name: "sparse_example_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SdcaShrinkL1.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SdcaShrinkL1.pbtxt index 8bc28bda66d735..23d9fdd793a2e9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SdcaShrinkL1.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SdcaShrinkL1.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SdcaShrinkL1" input_arg { name: "weights" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SegmentMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SegmentMax.pbtxt index c137b5ce06d672..a1d5968f9dd7bc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SegmentMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SegmentMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SegmentMax" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SegmentMaxV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SegmentMaxV2.pbtxt index a2186b6d58c8cb..f61b9101812e8a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SegmentMaxV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SegmentMaxV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SegmentMaxV2" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SegmentMean.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SegmentMean.pbtxt index a5a4a423220e35..b3c5ead77d8510 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SegmentMean.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SegmentMean.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SegmentMean" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SegmentMin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SegmentMin.pbtxt index feae22be352cfd..bf87e8294e9421 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SegmentMin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SegmentMin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SegmentMin" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SegmentMinV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SegmentMinV2.pbtxt index 16fd710dac8b47..fe2e5396ff0dde 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SegmentMinV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SegmentMinV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SegmentMinV2" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SegmentProd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SegmentProd.pbtxt index 4726236438d1a9..8fbc39235bf0f7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SegmentProd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SegmentProd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SegmentProd" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SegmentProdV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SegmentProdV2.pbtxt index a66300ef3e4230..9868f42e95da01 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SegmentProdV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SegmentProdV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SegmentProdV2" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SegmentSum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SegmentSum.pbtxt index 9f033b0db192b4..69289d5b7444b8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SegmentSum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SegmentSum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SegmentSum" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SegmentSumV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SegmentSumV2.pbtxt index f0bc5dce91d3b8..e11efffc4be916 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SegmentSumV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SegmentSumV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SegmentSumV2" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Select.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Select.pbtxt index c26b378e476155..38d00af197a978 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Select.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Select.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Select" input_arg { name: "condition" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SelectV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SelectV2.pbtxt index 0536e625bc1f15..a7c59f0d2a1778 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SelectV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SelectV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SelectV2" input_arg { name: "condition" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SelfAdjointEig.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SelfAdjointEig.pbtxt index c9bcf0815924a1..3657cc1bdc365d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SelfAdjointEig.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SelfAdjointEig.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SelfAdjointEig" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SelfAdjointEigV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SelfAdjointEigV2.pbtxt index 2e4b645639d6a6..8fbbfc961d7788 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SelfAdjointEigV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SelfAdjointEigV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SelfAdjointEigV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Selu.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Selu.pbtxt index d24219cff310f3..2acf579a5ca9a7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Selu.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Selu.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Selu" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SeluGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SeluGrad.pbtxt index d55750d1e75dd1..f96c7cbc158564 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SeluGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SeluGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SeluGrad" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Send.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Send.pbtxt index c2f8bd93e93c5c..73835060ddbcb2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Send.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Send.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Send" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SendTPUEmbeddingGradients.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SendTPUEmbeddingGradients.pbtxt index 0794e1c7e5c448..f6c486fe542bef 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SendTPUEmbeddingGradients.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SendTPUEmbeddingGradients.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SendTPUEmbeddingGradients" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SerializeIterator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SerializeIterator.pbtxt index ba098cea894104..262c9def883fca 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SerializeIterator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SerializeIterator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SerializeIterator" input_arg { name: "resource_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SerializeManySparse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SerializeManySparse.pbtxt index cb0c4c86419eec..9e741634385c16 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SerializeManySparse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SerializeManySparse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SerializeManySparse" input_arg { name: "sparse_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SerializeSparse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SerializeSparse.pbtxt index b9a18bcf35107c..5040d77fc2f407 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SerializeSparse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SerializeSparse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SerializeSparse" input_arg { name: "sparse_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SerializeTensor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SerializeTensor.pbtxt index c739dc6a8330f3..4d7b5cf5766745 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SerializeTensor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SerializeTensor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SerializeTensor" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SetSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SetSize.pbtxt index 38c8b19d19dee3..185e5e09734e96 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SetSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SetSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SetSize" input_arg { name: "set_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SetStatsAggregatorDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SetStatsAggregatorDataset.pbtxt index cf7b57075cfa14..fa2dfd389adb8c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SetStatsAggregatorDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SetStatsAggregatorDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SetStatsAggregatorDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Shape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Shape.pbtxt index c679caa24aad13..371bb9ef2fde71 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Shape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Shape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Shape" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShapeN.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShapeN.pbtxt index 04be1dd59c613c..15e9f11c0f8d3b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShapeN.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShapeN.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShapeN" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShardDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShardDataset.pbtxt index 2c08879ca68dd0..c23a4d3d2e3f98 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShardDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShardDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShardDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShardedFilename.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShardedFilename.pbtxt index df6d834f1f8c1e..cf46ffdbd78d54 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShardedFilename.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShardedFilename.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShardedFilename" input_arg { name: "basename" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShardedFilespec.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShardedFilespec.pbtxt index 7c2e0806b5ec58..7d1badcf09e83b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShardedFilespec.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShardedFilespec.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShardedFilespec" input_arg { name: "basename" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDataset.pbtxt index ca111a93648d73..195d66b2ab8956 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShuffleAndRepeatDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDatasetV2.pbtxt index 2ac2e838c3db86..1d22404cf064e0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShuffleAndRepeatDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShuffleDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShuffleDataset.pbtxt index e1b5ff3283e57a..35c0aa70c11696 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShuffleDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShuffleDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShuffleDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShuffleDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShuffleDatasetV2.pbtxt index 5dce75878aa05f..9ec7fa282d6307 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShuffleDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShuffleDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShuffleDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShuffleDatasetV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShuffleDatasetV3.pbtxt index ea30455a9f4acd..e037b818d4ffe1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShuffleDatasetV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShuffleDatasetV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShuffleDatasetV3" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShutdownDistributedTPU.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShutdownDistributedTPU.pbtxt index 5784b4418c3112..9e60b7f507554b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShutdownDistributedTPU.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShutdownDistributedTPU.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShutdownDistributedTPU" is_stateful: true } diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShutdownTPUSystem.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShutdownTPUSystem.pbtxt index df9e4f58f57dcf..ddddeddc63a18e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShutdownTPUSystem.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShutdownTPUSystem.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ShutdownTPUSystem" output_arg { name: "success" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Sigmoid.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Sigmoid.pbtxt index 16f433a337c378..dee59f6fa02f41 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Sigmoid.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Sigmoid.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Sigmoid" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SigmoidGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SigmoidGrad.pbtxt index 20c59f8f9311e6..788c3385098097 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SigmoidGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SigmoidGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SigmoidGrad" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Sign.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Sign.pbtxt index ee388e8ae2a4bf..07cb519996650f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Sign.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Sign.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Sign" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Sin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Sin.pbtxt index 6a3398c8da6e69..f6122e6e30b1f6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Sin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Sin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Sin" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Sinh.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Sinh.pbtxt index 0ff80863b11fda..7225234c7edcdf 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Sinh.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Sinh.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Sinh" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Size.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Size.pbtxt index 2c5e61f1496822..db039e4254ced1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Size.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Size.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Size" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SkipDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SkipDataset.pbtxt index e46a083e7dac16..07e0cf257f87ce 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SkipDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SkipDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SkipDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Skipgram.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Skipgram.pbtxt index 3734477985b239..d31bc826301db8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Skipgram.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Skipgram.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Skipgram" output_arg { name: "vocab_word" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SleepDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SleepDataset.pbtxt index c9cdd0432da162..0a1d637995e146 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SleepDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SleepDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SleepDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Slice.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Slice.pbtxt index 0d66369807d6ae..ced3fb6e0f0886 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Slice.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Slice.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Slice" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SlidingWindowDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SlidingWindowDataset.pbtxt index d3361f9d45b76e..ab63899bf4fb51 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SlidingWindowDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SlidingWindowDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SlidingWindowDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Snapshot.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Snapshot.pbtxt index c1fb2d69ed9253..aea213f7c50b1f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Snapshot.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Snapshot.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Snapshot" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SnapshotChunkDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SnapshotChunkDataset.pbtxt index 339bcd9980923d..e20fbcefee50e0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SnapshotChunkDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SnapshotChunkDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SnapshotChunkDataset" input_arg { name: "chunk_file" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SnapshotDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SnapshotDataset.pbtxt index b535c43a80371c..6d9002761ae02f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SnapshotDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SnapshotDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SnapshotDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SnapshotDatasetReader.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SnapshotDatasetReader.pbtxt index 7a9354125604db..d59d8edf2fc492 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SnapshotDatasetReader.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SnapshotDatasetReader.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SnapshotDatasetReader" input_arg { name: "shard_dir" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SnapshotDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SnapshotDatasetV2.pbtxt index e8636e3537e451..c9e244ed0e9099 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SnapshotDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SnapshotDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SnapshotDatasetV2" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SnapshotNestedDatasetReader.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SnapshotNestedDatasetReader.pbtxt index 60728b657a6dd4..078460bcb23930 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SnapshotNestedDatasetReader.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SnapshotNestedDatasetReader.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SnapshotNestedDatasetReader" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SobolSample.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SobolSample.pbtxt index 16377290c6194d..4fe7c45282a15f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SobolSample.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SobolSample.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SobolSample" input_arg { name: "dim" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Softmax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Softmax.pbtxt index 886e1dcb7168c8..03f499777eef54 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Softmax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Softmax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Softmax" input_arg { name: "logits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SoftmaxCrossEntropyWithLogits.pbtxt index 4b258297d20a27..8ac8052e30ffac 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SoftmaxCrossEntropyWithLogits.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SoftmaxCrossEntropyWithLogits.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SoftmaxCrossEntropyWithLogits" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Softplus.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Softplus.pbtxt index 66a82cdfd19bb1..3757e8d75039a8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Softplus.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Softplus.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Softplus" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SoftplusGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SoftplusGrad.pbtxt index 20ed580575492b..331b1abbf371f0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SoftplusGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SoftplusGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SoftplusGrad" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Softsign.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Softsign.pbtxt index 99ac45e08ba465..c83bc9929ab275 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Softsign.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Softsign.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Softsign" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SoftsignGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SoftsignGrad.pbtxt index da12fc0333ef5a..5411f9b5187758 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SoftsignGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SoftsignGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SoftsignGrad" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SortListOfSparseCoreCooTensors.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SortListOfSparseCoreCooTensors.pbtxt index 209998f61f0bd8..b86af96551b108 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SortListOfSparseCoreCooTensors.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SortListOfSparseCoreCooTensors.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SortListOfSparseCoreCooTensors" input_arg { name: "row_ids_list" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SpaceToBatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SpaceToBatch.pbtxt index 6a3a33227e6b4f..155e1b3a985e44 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SpaceToBatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SpaceToBatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SpaceToBatch" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SpaceToBatchND.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SpaceToBatchND.pbtxt index 3b0379dbec0ae4..c38026e5cde09c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SpaceToBatchND.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SpaceToBatchND.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SpaceToBatchND" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SpaceToDepth.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SpaceToDepth.pbtxt index 3abc14c711a049..c7dd03ea1041fc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SpaceToDepth.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SpaceToDepth.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SpaceToDepth" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseAccumulatorApplyGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseAccumulatorApplyGradient.pbtxt index cbd19b930643ff..7973ae8b558156 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseAccumulatorApplyGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseAccumulatorApplyGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseAccumulatorApplyGradient" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseAccumulatorTakeGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseAccumulatorTakeGradient.pbtxt index 5a35297ec764e3..1aaa40667ff84a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseAccumulatorTakeGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseAccumulatorTakeGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseAccumulatorTakeGradient" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseAdd.pbtxt index 84b00a504d9f9d..06122b12980987 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseAdd" input_arg { name: "a_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseAddGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseAddGrad.pbtxt index 96192d0094101c..5c5e9aa0118f89 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseAddGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseAddGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseAddGrad" input_arg { name: "backprop_val_grad" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdadelta.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdadelta.pbtxt index abff5aab28cbc4..5cd7caf5d819d7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdadelta.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdadelta.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyAdadelta" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagrad.pbtxt index 248c28c0b8e9e7..3e2e873c9ab59d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyAdagrad" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagradDA.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagradDA.pbtxt index 8dcc79dc49aa9c..e47c75ddc6cfa1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagradDA.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagradDA.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyAdagradDA" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagradV2.pbtxt index 00a3ca86f43359..2b5360f68b6ae1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyAdagradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyAdagradV2" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyCenteredRMSProp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyCenteredRMSProp.pbtxt index a5ae87d42c445a..8f958ce67f413a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyCenteredRMSProp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyCenteredRMSProp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyCenteredRMSProp" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyFtrl.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyFtrl.pbtxt index d43fe26ffc1a11..17f289a6bbba7f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyFtrl.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyFtrl.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyFtrl" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyFtrlV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyFtrlV2.pbtxt index 4ced4fe22d596a..1d7b1cab4169df 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyFtrlV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyFtrlV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyFtrlV2" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyMomentum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyMomentum.pbtxt index 17e60ae80df5eb..ffabce2fb7af25 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyMomentum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyMomentum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyMomentum" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyProximalAdagrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyProximalAdagrad.pbtxt index 5fb249592fba4f..c2a7440e3dcacb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyProximalAdagrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyProximalAdagrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyProximalAdagrad" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyProximalGradientDescent.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyProximalGradientDescent.pbtxt index f04e6553369cc9..525119034a7653 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyProximalGradientDescent.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyProximalGradientDescent.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyProximalGradientDescent" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyRMSProp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyRMSProp.pbtxt index aa27af72a44866..f5f7725c0e95e8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseApplyRMSProp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseApplyRMSProp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseApplyRMSProp" input_arg { name: "var" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseBincount.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseBincount.pbtxt index 9739aa7f2588de..9bbc5132845f1f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseBincount.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseBincount.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseBincount" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseConcat.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseConcat.pbtxt index 640957dd1750a5..ac291f4acbacce 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseConcat.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseConcat.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseConcat" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseConditionalAccumulator.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseConditionalAccumulator.pbtxt index 26bc9fa77b61d6..59def3f130ef21 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseConditionalAccumulator.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseConditionalAccumulator.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseConditionalAccumulator" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseCountSparseOutput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseCountSparseOutput.pbtxt index d80e465d0205ba..ed79733f97fcdf 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseCountSparseOutput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseCountSparseOutput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseCountSparseOutput" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseCross.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseCross.pbtxt index 81bd4101693f0e..f25372f5808567 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseCross.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseCross.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseCross" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseCrossHashed.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseCrossHashed.pbtxt index b0be6ee5dbcc64..73002a92f24850 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseCrossHashed.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseCrossHashed.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseCrossHashed" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseCrossV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseCrossV2.pbtxt index e2a3e7053512c3..206542e4713902 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseCrossV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseCrossV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseCrossV2" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseAdd.pbtxt index 474457187b36bf..ca71405a4c9f61 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseDenseCwiseAdd" input_arg { name: "sp_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseDiv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseDiv.pbtxt index d91c4c89585002..fe4b629ad1cf5b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseDiv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseDiv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseDenseCwiseDiv" input_arg { name: "sp_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseMul.pbtxt index f6fd9e956c0884..80f7245d66d1bb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseDenseCwiseMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseDenseCwiseMul" input_arg { name: "sp_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseFillEmptyRows.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseFillEmptyRows.pbtxt index a93278ac57811e..d99257aa7103af 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseFillEmptyRows.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseFillEmptyRows.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseFillEmptyRows" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseFillEmptyRowsGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseFillEmptyRowsGrad.pbtxt index af97bf3745301b..87f1c5c4e2d1e3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseFillEmptyRowsGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseFillEmptyRowsGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseFillEmptyRowsGrad" input_arg { name: "reverse_index_map" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatMul.pbtxt index e3a0ab25bb5f8d..d1eaa6a5edcb98 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatMul" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixAdd.pbtxt index 1a87d1beedee98..3a9efffdae686c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixAdd" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixMatMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixMatMul.pbtxt index db0ded9e1f7cae..a38613747717c9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixMatMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixMatMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixMatMul" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixMul.pbtxt index 1ed54fe50f9f58..649992a0298912 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixMul" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixNNZ.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixNNZ.pbtxt index 2073ae629c54cd..40363327c68c83 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixNNZ.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixNNZ.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixNNZ" input_arg { name: "sparse_matrix" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixOrderingAMD.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixOrderingAMD.pbtxt index cfa3dda19d4b26..b851bde928900d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixOrderingAMD.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixOrderingAMD.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixOrderingAMD" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSoftmax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSoftmax.pbtxt index 63579485eae114..d994082a1b3e9e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSoftmax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSoftmax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixSoftmax" input_arg { name: "logits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSoftmaxGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSoftmaxGrad.pbtxt index 133ffbbfc1047c..3bb68d7797d134 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSoftmaxGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSoftmaxGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixSoftmaxGrad" input_arg { name: "softmax" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSparseCholesky.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSparseCholesky.pbtxt index f14d9834fa04ef..1f74136d3a480e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSparseCholesky.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSparseCholesky.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixSparseCholesky" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSparseMatMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSparseMatMul.pbtxt index 51afd8da1a2ec7..3726d6bd8c0a4d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSparseMatMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixSparseMatMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixSparseMatMul" input_arg { name: "a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixTranspose.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixTranspose.pbtxt index ea071da4e77177..be6bcd6dc1e496 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixTranspose.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixTranspose.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixTranspose" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixZeros.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixZeros.pbtxt index 87b09ffd306f2c..6d743e24a0ad4c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixZeros.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseMatrixZeros.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseMatrixZeros" input_arg { name: "dense_shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseReduceMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseReduceMax.pbtxt index 86a4d1e0046906..4df1254af7460d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseReduceMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseReduceMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseReduceMax" input_arg { name: "input_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseReduceMaxSparse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseReduceMaxSparse.pbtxt index c44461c9d090be..81896440ab75a9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseReduceMaxSparse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseReduceMaxSparse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseReduceMaxSparse" input_arg { name: "input_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseReduceSum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseReduceSum.pbtxt index d0e5258dad8801..9f5a2d2e2be133 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseReduceSum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseReduceSum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseReduceSum" input_arg { name: "input_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseReduceSumSparse.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseReduceSumSparse.pbtxt index 12a18fbda1c045..b554e7ca6a00f6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseReduceSumSparse.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseReduceSumSparse.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseReduceSumSparse" input_arg { name: "input_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseReorder.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseReorder.pbtxt index 9a9bd24e406c00..5c5ad9078385e6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseReorder.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseReorder.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseReorder" input_arg { name: "input_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseReshape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseReshape.pbtxt index ab9f157b09f23f..934b5010a1e415 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseReshape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseReshape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseReshape" input_arg { name: "input_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMean.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMean.pbtxt index 6f56f3f5364548..0891aa6b0175ec 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMean.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMean.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentMean" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanGrad.pbtxt index a75fd742bf4e91..54e44b3e62d64b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentMeanGrad" input_arg { name: "grad" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanGradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanGradV2.pbtxt index b02a4838190c33..e1282b858a90ec 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanGradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanGradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentMeanGradV2" input_arg { name: "grad" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanWithNumSegments.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanWithNumSegments.pbtxt index 930da74b96b061..a27a2efc560465 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanWithNumSegments.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentMeanWithNumSegments.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentMeanWithNumSegments" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtN.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtN.pbtxt index feec752c94d180..b7a209c48d9ee6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtN.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtN.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentSqrtN" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNGrad.pbtxt index 18397fbb9cda45..d9a3c930a122de 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentSqrtNGrad" input_arg { name: "grad" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNGradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNGradV2.pbtxt index 1d8a3b4c921189..9436be8beb8e17 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNGradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNGradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentSqrtNGradV2" input_arg { name: "grad" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNWithNumSegments.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNWithNumSegments.pbtxt index 5aa63ca35fcace..b67358819acdc6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNWithNumSegments.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSqrtNWithNumSegments.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentSqrtNWithNumSegments" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSum.pbtxt index db9042011a6f8d..5962058393cf14 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentSum" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumGrad.pbtxt index e441e7f0a60f69..ce9742f898ef6b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentSumGrad" input_arg { name: "grad" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumGradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumGradV2.pbtxt index 4643278f47fad1..3baf45183e9464 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumGradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumGradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentSumGradV2" input_arg { name: "grad" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumWithNumSegments.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumWithNumSegments.pbtxt index 55674b28bbae69..4b8183aeeeabcd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumWithNumSegments.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSegmentSumWithNumSegments.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSegmentSumWithNumSegments" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSlice.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSlice.pbtxt index 60d914313b3da7..a6434cbfa71d2c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSlice.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSlice.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSlice" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSliceGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSliceGrad.pbtxt index d3d6693044a564..77134f37194b5a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSliceGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSliceGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSliceGrad" input_arg { name: "backprop_val_grad" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSoftmax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSoftmax.pbtxt index 003ea791ed61f0..5e447367f3be6d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSoftmax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSoftmax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSoftmax" input_arg { name: "sp_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSoftmaxCrossEntropyWithLogits.pbtxt index 884008f4ea9937..57d8f4c4662535 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSoftmaxCrossEntropyWithLogits.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSoftmaxCrossEntropyWithLogits.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSoftmaxCrossEntropyWithLogits" input_arg { name: "features" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSparseMaximum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSparseMaximum.pbtxt index c1bb9cb237fb35..bdd017c2252867 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSparseMaximum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSparseMaximum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSparseMaximum" input_arg { name: "a_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSparseMinimum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSparseMinimum.pbtxt index 7d1569221cce76..b600e95b3ae3bb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSparseMinimum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSparseMinimum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSparseMinimum" input_arg { name: "a_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseSplit.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseSplit.pbtxt index af8deac1a17908..997b2b21abdd44 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseSplit.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseSplit.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseSplit" input_arg { name: "split_dim" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseTensorDenseAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseTensorDenseAdd.pbtxt index c1b647cef05e96..8e95e67610204f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseTensorDenseAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseTensorDenseAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseTensorDenseAdd" input_arg { name: "a_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseTensorDenseMatMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseTensorDenseMatMul.pbtxt index 8fa6733bfc1b91..ce66c5306a5c2a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseTensorDenseMatMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseTensorDenseMatMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseTensorDenseMatMul" input_arg { name: "a_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseTensorSliceDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseTensorSliceDataset.pbtxt index bc7b81876d1c4e..af26fd8c180a3f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseTensorSliceDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseTensorSliceDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseTensorSliceDataset" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseTensorToCSRSparseMatrix.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseTensorToCSRSparseMatrix.pbtxt index a1215c2000ae1e..b45376fcd00d3c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseTensorToCSRSparseMatrix.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseTensorToCSRSparseMatrix.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseTensorToCSRSparseMatrix" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseToDense.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseToDense.pbtxt index 7e693267c06d2a..351603424e6ccd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseToDense.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseToDense.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseToDense" input_arg { name: "sparse_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SparseToSparseSetOperation.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SparseToSparseSetOperation.pbtxt index bac3763b0510a7..a7775a2f24a465 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SparseToSparseSetOperation.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SparseToSparseSetOperation.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SparseToSparseSetOperation" input_arg { name: "set1_indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Spence.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Spence.pbtxt index fefe73924fda23..7032cac3dce437 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Spence.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Spence.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Spence" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Split.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Split.pbtxt index 3d69a43f2e71ec..49428f7e5ce590 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Split.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Split.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Split" input_arg { name: "split_dim" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SplitDedupData.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SplitDedupData.pbtxt index 53d3ccdf1c6a6f..ada9d479888933 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SplitDedupData.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SplitDedupData.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SplitDedupData" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SplitV.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SplitV.pbtxt index 577d8a604fac7b..706d1c3c81c729 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SplitV.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SplitV.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SplitV" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SqlDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SqlDataset.pbtxt index fb233f139edaa7..68af0ac17eff32 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SqlDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SqlDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SqlDataset" input_arg { name: "driver_name" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Sqrt.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Sqrt.pbtxt index 371520ff6ae795..3c566b98b0dbff 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Sqrt.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Sqrt.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Sqrt" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SqrtGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SqrtGrad.pbtxt index 055baa35e98f93..d738e2023ffc24 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SqrtGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SqrtGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SqrtGrad" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Square.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Square.pbtxt index 52c0e31486d601..7501f29f1d95e4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Square.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Square.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Square" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SquaredDifference.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SquaredDifference.pbtxt index a680e0087f531d..29ea33c95e2d2a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SquaredDifference.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SquaredDifference.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SquaredDifference" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Squeeze.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Squeeze.pbtxt index d3733f99e07910..54335545f4807f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Squeeze.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Squeeze.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Squeeze" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Stack.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Stack.pbtxt index e4398a4783de8f..e8e459cfe2c08d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Stack.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Stack.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Stack" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StackClose.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StackClose.pbtxt index dd2b8efd9c95ab..8c916ab52a2c69 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StackClose.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StackClose.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StackClose" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StackCloseV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StackCloseV2.pbtxt index 66ddab1d90bed8..18c5934b0d7961 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StackCloseV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StackCloseV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StackCloseV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StackPop.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StackPop.pbtxt index 53965ff252d508..80e3ef79d09c61 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StackPop.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StackPop.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StackPop" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StackPopV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StackPopV2.pbtxt index 77300ed42e46f9..438d52b8ea5625 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StackPopV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StackPopV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StackPopV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StackPush.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StackPush.pbtxt index f2052fe41c1ab4..44fae0ce455f0c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StackPush.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StackPush.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StackPush" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StackPushV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StackPushV2.pbtxt index 6b3f8b5655b2a3..7149b4fda435c6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StackPushV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StackPushV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StackPushV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StackV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StackV2.pbtxt index 5d934a7b8bb00b..606361dd26fdb1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StackV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StackV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StackV2" input_arg { name: "max_size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Stage.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Stage.pbtxt index d7b0fbc1b9a4b7..8a64d696118f7f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Stage.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Stage.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Stage" input_arg { name: "values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StageClear.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StageClear.pbtxt index 7ce3219c586d58..1f43cdb901967d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StageClear.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StageClear.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StageClear" attr { name: "capacity" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StagePeek.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StagePeek.pbtxt index e9e6c68ed56344..a7397c488167fe 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StagePeek.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StagePeek.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StagePeek" input_arg { name: "index" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StageSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StageSize.pbtxt index b9bb218b6909aa..6f22fd3d032706 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StageSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StageSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StageSize" output_arg { name: "size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatefulPartitionedCall.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatefulPartitionedCall.pbtxt index a069806185b334..7d411c155ab2b9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatefulPartitionedCall.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatefulPartitionedCall.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatefulPartitionedCall" input_arg { name: "args" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatefulRandomBinomial.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatefulRandomBinomial.pbtxt index 798854feef481b..97eb7d4e8f0fa0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatefulRandomBinomial.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatefulRandomBinomial.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatefulRandomBinomial" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatefulStandardNormal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatefulStandardNormal.pbtxt index 4738b843a0b92a..44ef92c5a8f7ed 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatefulStandardNormal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatefulStandardNormal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatefulStandardNormal" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatefulStandardNormalV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatefulStandardNormalV2.pbtxt index 76e31871a6bbae..1b99b2320b0264 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatefulStandardNormalV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatefulStandardNormalV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatefulStandardNormalV2" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatefulTruncatedNormal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatefulTruncatedNormal.pbtxt index 8ef450390d5367..e74de4f0fceb55 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatefulTruncatedNormal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatefulTruncatedNormal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatefulTruncatedNormal" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatefulUniform.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatefulUniform.pbtxt index fef8635b75cb94..fd2b87c6e45988 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatefulUniform.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatefulUniform.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatefulUniform" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatefulUniformFullInt.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatefulUniformFullInt.pbtxt index 0536cdf62ce4cc..35ab70e0f3a1e2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatefulUniformFullInt.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatefulUniformFullInt.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatefulUniformFullInt" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatefulUniformInt.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatefulUniformInt.pbtxt index cd71c24e2762c1..06f62faaace7b6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatefulUniformInt.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatefulUniformInt.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatefulUniformInt" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessCase.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessCase.pbtxt index 367c9ddfdb9602..174c00e5c8ab4a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessCase.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessCase.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessCase" input_arg { name: "branch_index" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessIf.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessIf.pbtxt index a38480703ef14a..6eda6df052a58b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessIf.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessIf.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessIf" input_arg { name: "cond" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessMultinomial.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessMultinomial.pbtxt index 3a06bef6ef7747..16dac7dfd152bb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessMultinomial.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessMultinomial.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessMultinomial" input_arg { name: "logits" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessParameterizedTruncatedNormal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessParameterizedTruncatedNormal.pbtxt index 86509705c4f188..598125677b114a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessParameterizedTruncatedNormal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessParameterizedTruncatedNormal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessParameterizedTruncatedNormal" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomBinomial.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomBinomial.pbtxt index da4c351d221a53..1ba7e5c119b147 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomBinomial.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomBinomial.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomBinomial" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGammaV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGammaV2.pbtxt index 56d573cf2ec6d9..b3988b1f407d96 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGammaV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGammaV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomGammaV2" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGammaV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGammaV3.pbtxt index d2fbd60387e24d..00d9da80c101e1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGammaV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGammaV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomGammaV3" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetAlg.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetAlg.pbtxt index ff50f6fad96a1c..522fce59c88a05 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetAlg.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetAlg.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomGetAlg" output_arg { name: "alg" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetKeyCounter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetKeyCounter.pbtxt index 7532f9b2f6ffca..0d9c547181bb61 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetKeyCounter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetKeyCounter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomGetKeyCounter" input_arg { name: "seed" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetKeyCounterAlg.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetKeyCounterAlg.pbtxt index 6ed78cbec62348..4897ee80bb82d5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetKeyCounterAlg.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomGetKeyCounterAlg.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomGetKeyCounterAlg" input_arg { name: "seed" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomNormal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomNormal.pbtxt index 0de87cb2569ce2..804d904c148234 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomNormal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomNormal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomNormal" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomNormalV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomNormalV2.pbtxt index e5a48f3116056e..dac945afe5354e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomNormalV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomNormalV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomNormalV2" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomPoisson.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomPoisson.pbtxt index 575f54b2e22617..525b933d8b005b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomPoisson.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomPoisson.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomPoisson" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniform.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniform.pbtxt index d27528775e2460..22a5b25466b90f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniform.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniform.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomUniform" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformFullInt.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformFullInt.pbtxt index ede922b448a4de..a9d652634d7cb7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformFullInt.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformFullInt.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomUniformFullInt" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformFullIntV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformFullIntV2.pbtxt index 5a5bd12f262c5d..d4511c5447bda5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformFullIntV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformFullIntV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomUniformFullIntV2" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformInt.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformInt.pbtxt index 13014cca798e16..834a6fd5ad3983 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformInt.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformInt.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomUniformInt" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformIntV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformIntV2.pbtxt index 96bc39656a4b17..be4ed6072852be 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformIntV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformIntV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomUniformIntV2" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformV2.pbtxt index 9434be69beff1f..f66ee72bd4af30 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessRandomUniformV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessRandomUniformV2" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessSampleDistortedBoundingBox.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessSampleDistortedBoundingBox.pbtxt index 6c322d848ac079..6858a110cf46f5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessSampleDistortedBoundingBox.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessSampleDistortedBoundingBox.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessSampleDistortedBoundingBox" input_arg { name: "image_size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessShuffle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessShuffle.pbtxt index 06b6eb02eac58d..eab3565990135e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessShuffle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessShuffle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessShuffle" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessTruncatedNormal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessTruncatedNormal.pbtxt index a301c48afbe058..c8c8d850341f5f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessTruncatedNormal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessTruncatedNormal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessTruncatedNormal" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessTruncatedNormalV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessTruncatedNormalV2.pbtxt index f074ea7423b525..23f886f104dd24 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessTruncatedNormalV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessTruncatedNormalV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessTruncatedNormalV2" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatelessWhile.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatelessWhile.pbtxt index dc0b6353373c65..28579edbde5e67 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatelessWhile.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatelessWhile.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatelessWhile" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StaticRegexFullMatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StaticRegexFullMatch.pbtxt index d053ad6e1db6f6..be6078c102232d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StaticRegexFullMatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StaticRegexFullMatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StaticRegexFullMatch" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StaticRegexReplace.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StaticRegexReplace.pbtxt index e570f0de09ff55..fe3eb69a1a0044 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StaticRegexReplace.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StaticRegexReplace.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StaticRegexReplace" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorHandle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorHandle.pbtxt index 45080da97423ca..2d55e00492fddc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorHandle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorHandle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatsAggregatorHandle" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorHandleV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorHandleV2.pbtxt index 0fca6c398316bf..7dc361e958d794 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorHandleV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorHandleV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatsAggregatorHandleV2" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorSetSummaryWriter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorSetSummaryWriter.pbtxt index 55d5207beec293..24730ade1494c6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorSetSummaryWriter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorSetSummaryWriter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatsAggregatorSetSummaryWriter" input_arg { name: "stats_aggregator" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorSummary.pbtxt index 92d1f2ba507923..a0702a11168ff4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StatsAggregatorSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StatsAggregatorSummary" input_arg { name: "iterator" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StochasticCastToInt.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StochasticCastToInt.pbtxt index 66896c5d662940..930525b4364525 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StochasticCastToInt.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StochasticCastToInt.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StochasticCastToInt" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StopGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StopGradient.pbtxt index 06500717b6034a..26f7c677ab8e8a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StopGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StopGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StopGradient" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StoreMinibatchStatisticsInFdo.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StoreMinibatchStatisticsInFdo.pbtxt index 2250ba0eee369c..22766cb4409917 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StoreMinibatchStatisticsInFdo.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StoreMinibatchStatisticsInFdo.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StoreMinibatchStatisticsInFdo" input_arg { name: "program_key" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StridedSlice.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StridedSlice.pbtxt index 60186d9449d593..2c60bcb73c757f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StridedSlice.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StridedSlice.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StridedSlice" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StridedSliceAssign.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StridedSliceAssign.pbtxt index ba3eaed57e270e..8393dc7272c59c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StridedSliceAssign.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StridedSliceAssign.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StridedSliceAssign" input_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StridedSliceGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StridedSliceGrad.pbtxt index 92bad979c0f579..14f6a464020a5a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StridedSliceGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StridedSliceGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StridedSliceGrad" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringFormat.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringFormat.pbtxt index 23c48e47c2761d..bea32908608e6f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringFormat.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringFormat.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringFormat" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringJoin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringJoin.pbtxt index a45262ee65ed29..790cb7b5b6c8d6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringJoin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringJoin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringJoin" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringLength.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringLength.pbtxt index 169adeecbed33f..5bdf993f907a2a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringLength.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringLength.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringLength" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringLower.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringLower.pbtxt index 5f7dbf3b2bb283..1c886146d0560d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringLower.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringLower.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringLower" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringNGrams.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringNGrams.pbtxt index 4281b96aee5007..025fc052819bf2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringNGrams.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringNGrams.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringNGrams" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringSplit.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringSplit.pbtxt index 1832d334e79837..35e8594235e170 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringSplit.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringSplit.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringSplit" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringSplitV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringSplitV2.pbtxt index 03ba25fd70168b..fbdf8e06f372c1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringSplitV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringSplitV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringSplitV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringStrip.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringStrip.pbtxt index 153bd63f4e2313..3fff999e93789b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringStrip.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringStrip.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringStrip" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucket.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucket.pbtxt index 6cbdd4c7bf8c28..7147a40a12d5af 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucket.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucket.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringToHashBucket" input_arg { name: "string_tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucketFast.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucketFast.pbtxt index a07a00a41eecee..8ef1227faae9b7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucketFast.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucketFast.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringToHashBucketFast" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucketStrong.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucketStrong.pbtxt index de121b287b3aa9..2dbd9920711fe4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucketStrong.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringToHashBucketStrong.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringToHashBucketStrong" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringToNumber.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringToNumber.pbtxt index 833aaa61533c1f..fb09d67516e7fd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringToNumber.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringToNumber.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringToNumber" input_arg { name: "string_tensor" @@ -48,3 +48,31 @@ op { } } } +op { + name: "StringToNumber" + input_arg { + name: "string_tensor" + type: DT_STRING + } + output_arg { + name: "output" + type_attr: "out_type" + } + attr { + name: "out_type" + type: "type" + default_value { + type: DT_FLOAT + } + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT32 + type: DT_INT64 + type: DT_UINT32 + type: DT_UINT64 + } + } + } +} diff --git a/tensorflow/core/ops/compat/ops_history_v2/StringUpper.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/StringUpper.pbtxt index 69c6133ad0501b..8df4881554cefe 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/StringUpper.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/StringUpper.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "StringUpper" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Sub.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Sub.pbtxt index 44761cb1a3bf95..4d89817f561874 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Sub.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Sub.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Sub" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Substr.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Substr.pbtxt index ebfc6279bbe635..a5c1d2c0ae0aa8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Substr.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Substr.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Substr" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Sum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Sum.pbtxt index 4a71be5f59cfe9..fb7ce8600c4ecd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Sum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Sum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Sum" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SummaryWriter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SummaryWriter.pbtxt index 199ded68ff3bbf..a6fd9170f2a121 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SummaryWriter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SummaryWriter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SummaryWriter" output_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Svd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Svd.pbtxt index 3750a39144739e..48003906cc05ed 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Svd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Svd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Svd" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Switch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Switch.pbtxt index 2ff607548010d7..0856f3459b3c02 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Switch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Switch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Switch" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SymbolicGradient.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SymbolicGradient.pbtxt index 5193954ea7eebd..aae5457863e176 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SymbolicGradient.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SymbolicGradient.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SymbolicGradient" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/SyncDevice.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/SyncDevice.pbtxt index cc6bf6b6f0c0b0..e55c5f4ade1d4b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/SyncDevice.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/SyncDevice.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "SyncDevice" is_stateful: true } diff --git a/tensorflow/core/ops/compat/ops_history_v2/TFRecordDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TFRecordDataset.pbtxt index b68eea8fbd264c..6caa343ebc05b8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TFRecordDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TFRecordDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TFRecordDataset" input_arg { name: "filenames" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TFRecordDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TFRecordDatasetV2.pbtxt index af944b699a0e9d..d3b89247493e91 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TFRecordDatasetV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TFRecordDatasetV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TFRecordDatasetV2" input_arg { name: "filenames" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TFRecordReader.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TFRecordReader.pbtxt index 0f223c57ec5ed3..684c21ea45e8f5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TFRecordReader.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TFRecordReader.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TFRecordReader" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TFRecordReaderV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TFRecordReaderV2.pbtxt index 0f9f02ce01d876..bcdb4764d378da 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TFRecordReaderV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TFRecordReaderV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TFRecordReaderV2" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUAnnotateTensorsWithDynamicShape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUAnnotateTensorsWithDynamicShape.pbtxt index 09d484e3194e76..eb1f07856a9ef6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUAnnotateTensorsWithDynamicShape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUAnnotateTensorsWithDynamicShape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUAnnotateTensorsWithDynamicShape" input_arg { name: "tensors" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUCompilationResult.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUCompilationResult.pbtxt index bdaf1f9e51a8c9..04a95cc089fd4e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUCompilationResult.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUCompilationResult.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUCompilationResult" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUCompile.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUCompile.pbtxt index 7328f9214e3be6..be95091c809227 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUCompile.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUCompile.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUCompile" input_arg { name: "dynamic_shapes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUCompileSucceededAssert.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUCompileSucceededAssert.pbtxt index a39d0e72843b10..bc1b3c153f10ec 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUCompileSucceededAssert.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUCompileSucceededAssert.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUCompileSucceededAssert" input_arg { name: "compilation_status" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUCopyWithDynamicShape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUCopyWithDynamicShape.pbtxt index 8b897ff34cca06..1e8386d91a5760 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUCopyWithDynamicShape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUCopyWithDynamicShape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUCopyWithDynamicShape" input_arg { name: "tensors" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUEmbeddingActivations.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUEmbeddingActivations.pbtxt index 0bd460f3a0bf50..3975077297a6fa 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUEmbeddingActivations.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUEmbeddingActivations.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUEmbeddingActivations" input_arg { name: "embedding_variable" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUExecute.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUExecute.pbtxt index a231036be50e8b..97a2c2a1f6673d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUExecute.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUExecute.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUExecute" input_arg { name: "args" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUExecuteAndUpdateVariables.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUExecuteAndUpdateVariables.pbtxt index 2d41c28beff465..5a611f9202d83d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUExecuteAndUpdateVariables.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUExecuteAndUpdateVariables.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUExecuteAndUpdateVariables" input_arg { name: "args" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUOrdinalSelector.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUOrdinalSelector.pbtxt index de910326bd6a79..3fb272504068ce 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUOrdinalSelector.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUOrdinalSelector.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUOrdinalSelector" output_arg { name: "device_ordinals" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedCall.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedCall.pbtxt index cc0652a3790b31..1ec9cb3e43400f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedCall.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedCall.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUPartitionedCall" input_arg { name: "args" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedInput.pbtxt index b5eca4ed94d216..aab0574d99eb95 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUPartitionedInput" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedInputV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedInputV2.pbtxt index 52ec50cc1bf7bd..fe0ef345a1c628 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedInputV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedInputV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUPartitionedInputV2" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedOutput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedOutput.pbtxt index ad5122c3f1b732..38a85e319644ac 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedOutput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedOutput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUPartitionedOutput" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedOutputV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedOutputV2.pbtxt index 83b7375fa2f1bf..3e7a6039ad8ca0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedOutputV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUPartitionedOutputV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUPartitionedOutputV2" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUReplicateMetadata.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUReplicateMetadata.pbtxt index 9ea81b7f929a1f..9742ad0d8d40c9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUReplicateMetadata.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUReplicateMetadata.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUReplicateMetadata" attr { name: "num_replicas" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUReplicatedInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUReplicatedInput.pbtxt index f5a8ddcf284366..b549b570c13777 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUReplicatedInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUReplicatedInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUReplicatedInput" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUReplicatedOutput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUReplicatedOutput.pbtxt index f7e9600cf4fb99..70b7d0ae71aadc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUReplicatedOutput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUReplicatedOutput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUReplicatedOutput" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPUReshardVariables.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPUReshardVariables.pbtxt index 0bc7b4611fc7e8..fecd05d06eb1b9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPUReshardVariables.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPUReshardVariables.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPUReshardVariables" input_arg { name: "vars" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TPURoundRobin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TPURoundRobin.pbtxt index 1405bc8cd6b167..7630e0cac746a0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TPURoundRobin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TPURoundRobin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TPURoundRobin" output_arg { name: "device_ordinal" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TakeDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TakeDataset.pbtxt index 4d9da96372ae09..8ced9c67054cc1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TakeDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TakeDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TakeDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TakeManySparseFromTensorsMap.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TakeManySparseFromTensorsMap.pbtxt index 7b852a26ddec81..0e3ca630eb178e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TakeManySparseFromTensorsMap.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TakeManySparseFromTensorsMap.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TakeManySparseFromTensorsMap" input_arg { name: "sparse_handles" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TakeWhileDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TakeWhileDataset.pbtxt index 7586b070a73be4..bfde2664966ef2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TakeWhileDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TakeWhileDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TakeWhileDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Tan.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Tan.pbtxt index 45442d6d712d71..a78f07b8d21382 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Tan.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Tan.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Tan" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Tanh.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Tanh.pbtxt index 9b7ccfe0bc9503..1672b0dc825c79 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Tanh.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Tanh.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Tanh" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TanhGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TanhGrad.pbtxt index 41a827121b8c56..67d28f8ad7e0b4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TanhGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TanhGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TanhGrad" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TemporaryVariable.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TemporaryVariable.pbtxt index d5c19d9f1ef34d..191354ec959700 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TemporaryVariable.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TemporaryVariable.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TemporaryVariable" output_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArray.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArray.pbtxt index 67aa5f61327ac7..74b1a54976cc8b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArray.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArray.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArray" input_arg { name: "size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayClose.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayClose.pbtxt index 44b06bcbf10e95..63c0100942005b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayClose.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayClose.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayClose" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayCloseV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayCloseV2.pbtxt index 5298e3d85742b8..b0fb5804f1a2cf 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayCloseV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayCloseV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayCloseV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayCloseV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayCloseV3.pbtxt index 63af4407f62ae6..c5d1c2b1f244ce 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayCloseV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayCloseV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayCloseV3" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcat.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcat.pbtxt index 1111e79677e141..e2c59abd687402 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcat.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcat.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayConcat" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcatV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcatV2.pbtxt index b08f04af049c80..72376bd561c910 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcatV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcatV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayConcatV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcatV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcatV3.pbtxt index 70a7939d25c9d2..91e575ca87f6b3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcatV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayConcatV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayConcatV3" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGather.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGather.pbtxt index 30b54dbce7b8b4..a8ded38550bc8b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGather.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGather.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayGather" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGatherV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGatherV2.pbtxt index 3025ec43a0da9c..f72968388437f6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGatherV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGatherV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayGatherV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGatherV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGatherV3.pbtxt index bf80504e78a1fb..c87538a40d26ab 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGatherV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGatherV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayGatherV3" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGrad.pbtxt index 326cb594c3b5af..422154510dbcdf 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayGrad" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradV2.pbtxt index 092dd8435055b7..d989c4071435ee 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayGradV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradV3.pbtxt index 749e282ae92a2d..53e20429ec0e6f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayGradV3" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradWithShape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradWithShape.pbtxt index 100c3e7c78a4ef..1ce739062eb4c2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradWithShape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayGradWithShape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayGradWithShape" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayPack.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayPack.pbtxt index 7b4ca8e9f4d39e..f608e453cee31b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayPack.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayPack.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayPack" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayRead.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayRead.pbtxt index c3398036b5e4b5..62660bec758965 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayRead.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayRead.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayRead" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayReadV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayReadV2.pbtxt index 5620c0ade70a6c..cd0a2a32a8c06b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayReadV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayReadV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayReadV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayReadV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayReadV3.pbtxt index bb53ce3649d56c..59e66fc84d5cdb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayReadV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayReadV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayReadV3" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatter.pbtxt index 37c69a9b398e2e..b2017163f5c072 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayScatter" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatterV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatterV2.pbtxt index 798222d0d00d64..1eacf2d9acfa3c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatterV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatterV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayScatterV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatterV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatterV3.pbtxt index c9ecffbc9692c1..5053ed60b1130e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatterV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayScatterV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayScatterV3" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySize.pbtxt index 690f2118b6269f..7f6ce9510a0bff 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArraySize" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySizeV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySizeV2.pbtxt index bf446335854293..8ee9eda30bd21f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySizeV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySizeV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArraySizeV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySizeV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySizeV3.pbtxt index c156e6af69ea9b..8932b0dcf2ddf7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySizeV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySizeV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArraySizeV3" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplit.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplit.pbtxt index 4a693b29ff9cba..06bf8bfc3595fd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplit.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplit.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArraySplit" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplitV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplitV2.pbtxt index fa9e02adb9326b..b45ea7a6108a66 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplitV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplitV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArraySplitV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplitV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplitV3.pbtxt index 2eed16078de27e..c072c0c65fc008 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplitV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArraySplitV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArraySplitV3" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayUnpack.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayUnpack.pbtxt index 6fd145fbf55fac..81e5abec891b9b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayUnpack.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayUnpack.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayUnpack" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayV2.pbtxt index 3c01113a63e8c4..1293e1999c2031 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayV2" input_arg { name: "size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayV3.pbtxt index ef70cea090839b..906e407de181e0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayV3" input_arg { name: "size" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWrite.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWrite.pbtxt index af5ad923f1d8c1..8f1a94c36b3878 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWrite.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWrite.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayWrite" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWriteV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWriteV2.pbtxt index ba33173b1dc971..fa0c1a679f7dfa 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWriteV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWriteV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayWriteV2" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWriteV3.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWriteV3.pbtxt index 44670bec744cf5..45327d42b1be1b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWriteV3.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorArrayWriteV3.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorArrayWriteV3" input_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorDataset.pbtxt index a70bea1c6975d5..9e71deef2c597c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorDataset" input_arg { name: "components" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListConcat.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListConcat.pbtxt index 8b715d4864011e..010be2e120b46e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListConcat.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListConcat.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListConcat" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListConcatLists.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListConcatLists.pbtxt index e532be3918c367..57dd05a90feb7d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListConcatLists.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListConcatLists.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListConcatLists" input_arg { name: "input_a" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListConcatV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListConcatV2.pbtxt index 6051430fd552bf..0bb9546d155d29 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListConcatV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListConcatV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListConcatV2" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListElementShape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListElementShape.pbtxt index e4143fdbe9dd06..26b982f6cfd9c3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListElementShape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListElementShape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListElementShape" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListFromTensor.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListFromTensor.pbtxt index f3111b9756db4e..c2be2938c8607e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListFromTensor.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListFromTensor.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListFromTensor" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListGather.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListGather.pbtxt index 271fc2db2c0161..43b4773a4f7e0f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListGather.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListGather.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListGather" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListGetItem.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListGetItem.pbtxt index ff087f92a0721a..fa124bc94971fe 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListGetItem.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListGetItem.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListGetItem" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListLength.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListLength.pbtxt index 8b95320f200740..b4ea660dca13de 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListLength.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListLength.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListLength" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListPopBack.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListPopBack.pbtxt index 61dd1111cb64f5..35aa68e07584cc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListPopBack.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListPopBack.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListPopBack" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListPushBack.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListPushBack.pbtxt index 1ae20365fb9bab..8175cfe350dab0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListPushBack.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListPushBack.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListPushBack" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListPushBackBatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListPushBackBatch.pbtxt index 6e805caed1bcc2..29b878e527a4d5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListPushBackBatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListPushBackBatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListPushBackBatch" input_arg { name: "input_handles" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListReserve.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListReserve.pbtxt index bef49b63477ebe..98ade8cb4a9fd0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListReserve.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListReserve.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListReserve" input_arg { name: "element_shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListResize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListResize.pbtxt index 50e070018e2a0b..b322d89d47aa68 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListResize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListResize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListResize" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListScatter.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListScatter.pbtxt index 31fa2452bb11d9..daa2f4130ab06b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListScatter.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListScatter.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListScatter" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListScatterIntoExistingList.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListScatterIntoExistingList.pbtxt index 311157650998f6..4427bab8a358c2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListScatterIntoExistingList.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListScatterIntoExistingList.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListScatterIntoExistingList" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListScatterV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListScatterV2.pbtxt index 29cf4aba78680f..de588984614839 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListScatterV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListScatterV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListScatterV2" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListSetItem.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListSetItem.pbtxt index d16b98d59bdc54..e2510ca98e0b8f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListSetItem.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListSetItem.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListSetItem" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListSplit.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListSplit.pbtxt index 2ed8f207fed4c3..ff83247addf89b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListSplit.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListSplit.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListSplit" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorListStack.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorListStack.pbtxt index 8623e34a934845..5a8e7bcd81b9ef 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorListStack.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorListStack.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorListStack" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorMapErase.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorMapErase.pbtxt index 009fd49ee6288a..854e7311eab331 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorMapErase.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorMapErase.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorMapErase" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorMapHasKey.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorMapHasKey.pbtxt index e2856e3ecaea56..a095c36d7c26f5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorMapHasKey.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorMapHasKey.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorMapHasKey" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorMapInsert.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorMapInsert.pbtxt index 492d3538de0ed5..10061ea1cde6dc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorMapInsert.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorMapInsert.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorMapInsert" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorMapLookup.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorMapLookup.pbtxt index 9f1097226fc7b3..b48fda8ac4623f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorMapLookup.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorMapLookup.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorMapLookup" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorMapSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorMapSize.pbtxt index a94f2c515ece36..dd8ade84414f56 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorMapSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorMapSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorMapSize" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorMapStackKeys.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorMapStackKeys.pbtxt index 10aef43aca1343..c3befaa320a385 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorMapStackKeys.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorMapStackKeys.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorMapStackKeys" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorScatterAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorScatterAdd.pbtxt index 71f0e9fb625334..5fb5b8cb0dd693 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorScatterAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorScatterAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorScatterAdd" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorScatterMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorScatterMax.pbtxt index 3abba955f62158..84a05c9bd5dba9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorScatterMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorScatterMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorScatterMax" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorScatterMin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorScatterMin.pbtxt index 047619805f2aff..d1ae6117921ee0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorScatterMin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorScatterMin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorScatterMin" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorScatterSub.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorScatterSub.pbtxt index d66d823ef317c8..81920523ae9bbc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorScatterSub.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorScatterSub.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorScatterSub" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorScatterUpdate.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorScatterUpdate.pbtxt index 4bc0747881f943..1e8281cc247958 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorScatterUpdate.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorScatterUpdate.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorScatterUpdate" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorSliceDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorSliceDataset.pbtxt index 7be6e03ca4a19c..3810a6b4023657 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorSliceDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorSliceDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorSliceDataset" input_arg { name: "components" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorStridedSliceUpdate.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorStridedSliceUpdate.pbtxt index 67dae5ab4dd346..3854eeed137057 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorStridedSliceUpdate.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorStridedSliceUpdate.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorStridedSliceUpdate" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorSummary.pbtxt index 5486c2ef84aaed..bf4114aeef398c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorSummary" input_arg { name: "tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TensorSummaryV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TensorSummaryV2.pbtxt index 107c53939f3b29..39092b078161f0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TensorSummaryV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TensorSummaryV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TensorSummaryV2" input_arg { name: "tag" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TextLineDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TextLineDataset.pbtxt index 5f539c525b0048..c51a34124f2d1e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TextLineDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TextLineDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TextLineDataset" input_arg { name: "filenames" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TextLineReader.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TextLineReader.pbtxt index 2e0924bb51786e..baf1ef10d91047 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TextLineReader.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TextLineReader.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TextLineReader" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TextLineReaderV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TextLineReaderV2.pbtxt index d30d08b5fbf150..c669951acdf6b6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TextLineReaderV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TextLineReaderV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TextLineReaderV2" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ThreadPoolDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ThreadPoolDataset.pbtxt index b8c817fc13876a..8e185af579f2be 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ThreadPoolDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ThreadPoolDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ThreadPoolDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ThreadPoolHandle.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ThreadPoolHandle.pbtxt index 4fac8fb83b79f1..e2518b1439d732 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ThreadPoolHandle.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ThreadPoolHandle.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ThreadPoolHandle" output_arg { name: "handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ThreadUnsafeUnigramCandidateSampler.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ThreadUnsafeUnigramCandidateSampler.pbtxt index 0e99e93edcc732..89106aab220583 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ThreadUnsafeUnigramCandidateSampler.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ThreadUnsafeUnigramCandidateSampler.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ThreadUnsafeUnigramCandidateSampler" input_arg { name: "true_classes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Tile.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Tile.pbtxt index f3a139e2265abd..67de1e5201698c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Tile.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Tile.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Tile" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TileGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TileGrad.pbtxt index f068a74b1c3d3d..f710e1c470f254 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TileGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TileGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TileGrad" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Timestamp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Timestamp.pbtxt index 18f5f4d2d23f03..6e51504d17653e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Timestamp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Timestamp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Timestamp" output_arg { name: "ts" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ToBool.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ToBool.pbtxt index 2903fe7f0b2e7f..6e02fdbb52cdeb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ToBool.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ToBool.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ToBool" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TopK.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TopK.pbtxt index 8ad9a3c23bb9c7..71c98b7fd8120e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TopK.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TopK.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TopK" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TopKUnique.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TopKUnique.pbtxt index 23b15af2aef16e..12463385bcc816 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TopKUnique.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TopKUnique.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TopKUnique" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TopKV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TopKV2.pbtxt index 5da8c479b9a8b3..22908f661e62f9 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TopKV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TopKV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TopKV2" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TopKWithUnique.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TopKWithUnique.pbtxt index cfb4e9da928507..5e3216fa554877 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TopKWithUnique.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TopKWithUnique.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TopKWithUnique" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TpuHandleToProtoKey.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TpuHandleToProtoKey.pbtxt index 3d2c41d36b4d15..1f8d36887b9733 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TpuHandleToProtoKey.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TpuHandleToProtoKey.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TpuHandleToProtoKey" input_arg { name: "uid" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Transpose.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Transpose.pbtxt index e83ec349f19b72..fa4fb6d58937c4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Transpose.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Transpose.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Transpose" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TridiagonalMatMul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TridiagonalMatMul.pbtxt index 98f70443f67a82..117d68b48d29b6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TridiagonalMatMul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TridiagonalMatMul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TridiagonalMatMul" input_arg { name: "superdiag" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TridiagonalSolve.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TridiagonalSolve.pbtxt index f86be1a7508c37..d824c1cb76c35e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TridiagonalSolve.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TridiagonalSolve.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TridiagonalSolve" input_arg { name: "diagonals" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TruncateDiv.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TruncateDiv.pbtxt index 74a4db11dc5efc..0b6c414f616163 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TruncateDiv.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TruncateDiv.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TruncateDiv" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TruncateMod.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TruncateMod.pbtxt index 72517db5294d89..70ce81b35c4209 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TruncateMod.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TruncateMod.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TruncateMod" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/TruncatedNormal.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/TruncatedNormal.pbtxt index c066b3283ec0ad..018d657985d5e8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/TruncatedNormal.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/TruncatedNormal.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "TruncatedNormal" input_arg { name: "shape" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Unbatch.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Unbatch.pbtxt index 624522ee54ced0..3934b1823ff052 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Unbatch.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Unbatch.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Unbatch" input_arg { name: "batched_tensor" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnbatchDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnbatchDataset.pbtxt index fa3075b92f41ee..84479c117206d7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnbatchDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnbatchDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnbatchDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnbatchGrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnbatchGrad.pbtxt index f2619637143ee8..97240f0be53a0f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnbatchGrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnbatchGrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnbatchGrad" input_arg { name: "original_input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UncompressElement.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UncompressElement.pbtxt index 04674945d82c97..68406e0e4bc755 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UncompressElement.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UncompressElement.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UncompressElement" input_arg { name: "compressed" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnicodeDecode.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnicodeDecode.pbtxt index a8aac23ae733e7..fa036b31ef6c38 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnicodeDecode.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnicodeDecode.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnicodeDecode" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnicodeDecodeWithOffsets.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnicodeDecodeWithOffsets.pbtxt index 05a35cc1ad2c7f..29d274738da829 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnicodeDecodeWithOffsets.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnicodeDecodeWithOffsets.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnicodeDecodeWithOffsets" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnicodeEncode.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnicodeEncode.pbtxt index de0b916b6ab2a2..31a7a5b838820d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnicodeEncode.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnicodeEncode.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnicodeEncode" input_arg { name: "input_values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnicodeScript.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnicodeScript.pbtxt index b9d7590e7c2ff2..60877b544480fe 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnicodeScript.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnicodeScript.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnicodeScript" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnicodeTranscode.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnicodeTranscode.pbtxt index 494d7a9d6d8373..5cab73782ce8ec 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnicodeTranscode.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnicodeTranscode.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnicodeTranscode" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniformCandidateSampler.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniformCandidateSampler.pbtxt index affff4ad02d2dc..bea963f908ee14 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniformCandidateSampler.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniformCandidateSampler.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniformCandidateSampler" input_arg { name: "true_classes" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniformDequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniformDequantize.pbtxt index 68557e82c563e1..7653370635d18c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniformDequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniformDequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniformDequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantize.pbtxt index 3e7d42c86198c8..900da8e5be53c3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniformQuantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedAdd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedAdd.pbtxt index 480e70a6693739..1adac5ae59e790 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedAdd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedAdd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniformQuantizedAdd" input_arg { name: "lhs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedClipByValue.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedClipByValue.pbtxt index a9d0a79fded26e..8ab01d6c42d9a6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedClipByValue.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedClipByValue.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniformQuantizedClipByValue" input_arg { name: "operand" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedConvolution.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedConvolution.pbtxt index 68c4746c5210ae..4ebbe6c80690e8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedConvolution.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedConvolution.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniformQuantizedConvolution" input_arg { name: "lhs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedConvolutionHybrid.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedConvolutionHybrid.pbtxt index 9a9f8fc085fccb..23096f391d9b6e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedConvolutionHybrid.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedConvolutionHybrid.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniformQuantizedConvolutionHybrid" input_arg { name: "lhs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedDot.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedDot.pbtxt index 9b3030bb3bd8a6..159dabb9798621 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedDot.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedDot.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniformQuantizedDot" input_arg { name: "lhs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedDotHybrid.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedDotHybrid.pbtxt index 38c8ed55f5978e..2cabf91667e386 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedDotHybrid.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniformQuantizedDotHybrid.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniformQuantizedDotHybrid" input_arg { name: "lhs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniformRequantize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniformRequantize.pbtxt index bfc3d47945b686..af2d7387c5d695 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniformRequantize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniformRequantize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniformRequantize" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Unique.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Unique.pbtxt index 9d91d9ef4c6164..be389ba1482be8 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Unique.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Unique.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Unique" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniqueDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniqueDataset.pbtxt index 58fc7e07a35822..281ba7c1bd0619 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniqueDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniqueDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniqueDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniqueV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniqueV2.pbtxt index e76969551065c0..83113e14232fb2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniqueV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniqueV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniqueV2" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniqueWithCounts.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniqueWithCounts.pbtxt index 4d5682561cf926..c386059943a70e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniqueWithCounts.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniqueWithCounts.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniqueWithCounts" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UniqueWithCountsV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UniqueWithCountsV2.pbtxt index d54bba74d97327..85a12b70007320 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UniqueWithCountsV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UniqueWithCountsV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UniqueWithCountsV2" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Unpack.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Unpack.pbtxt index 6f24385e25bce6..cc5fd918d4c694 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Unpack.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Unpack.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Unpack" input_arg { name: "value" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnravelIndex.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnravelIndex.pbtxt index 36e66f2d2b0f4b..df2c2bc8469451 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnravelIndex.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnravelIndex.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnravelIndex" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentJoin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentJoin.pbtxt index 8c95acc7ab9dba..dcbb91bc2f13c4 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentJoin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentJoin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnsortedSegmentJoin" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentMax.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentMax.pbtxt index fc9ca18d6cfeed..ee8578f289bdc0 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentMax.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentMax.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnsortedSegmentMax" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentMin.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentMin.pbtxt index 33e3635173d589..6a8e5ba6d1fbe6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentMin.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentMin.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnsortedSegmentMin" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentProd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentProd.pbtxt index e1543a33441e81..d100cde127e9f1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentProd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentProd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnsortedSegmentProd" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentSum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentSum.pbtxt index ff0bf2d7a8cf8a..28ddbd3bdec499 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentSum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnsortedSegmentSum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnsortedSegmentSum" input_arg { name: "data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Unstage.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Unstage.pbtxt index af51a8ce0b8ca6..4bcfd02758ced6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Unstage.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Unstage.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Unstage" output_arg { name: "values" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UnwrapDatasetVariant.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UnwrapDatasetVariant.pbtxt index fc01d23c863703..10e23a97750a70 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UnwrapDatasetVariant.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UnwrapDatasetVariant.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UnwrapDatasetVariant" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/UpperBound.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/UpperBound.pbtxt index 5cfd0d536c5238..d1b3fa060c6942 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/UpperBound.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/UpperBound.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "UpperBound" input_arg { name: "sorted_inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/VarHandleOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/VarHandleOp.pbtxt index acca5ff60992e7..80bc633b5ac613 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/VarHandleOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/VarHandleOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "VarHandleOp" output_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/VarIsInitializedOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/VarIsInitializedOp.pbtxt index 975983e81af8ff..395360158262b3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/VarIsInitializedOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/VarIsInitializedOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "VarIsInitializedOp" input_arg { name: "resource" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Variable.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Variable.pbtxt index 7ad65f1fe6a3f6..943c24def5944d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Variable.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Variable.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Variable" output_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/VariableShape.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/VariableShape.pbtxt index e0bb7d05dafe25..570b4f241aaa95 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/VariableShape.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/VariableShape.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "VariableShape" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/VariableV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/VariableV2.pbtxt index dabd46dcfb467c..c27112f15887b1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/VariableV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/VariableV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "VariableV2" output_arg { name: "ref" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WeightedFlatMapDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WeightedFlatMapDataset.pbtxt index 5c50229e4c5b07..98c853bb2f3e64 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WeightedFlatMapDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WeightedFlatMapDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WeightedFlatMapDataset" input_arg { name: "input_datasets" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Where.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Where.pbtxt index b23dca17037197..8e64cd2419e2c1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Where.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Where.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Where" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/While.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/While.pbtxt index 8e609867f8d94f..807461b00984bd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/While.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/While.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "While" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WholeFileReader.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WholeFileReader.pbtxt index b1513138650504..729d76503e53dd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WholeFileReader.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WholeFileReader.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WholeFileReader" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WholeFileReaderV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WholeFileReaderV2.pbtxt index f451cf41c57bc3..2430494342d709 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WholeFileReaderV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WholeFileReaderV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WholeFileReaderV2" output_arg { name: "reader_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WindowDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WindowDataset.pbtxt index a5fe3aabbfe45e..43784faad6dc62 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WindowDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WindowDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WindowDataset" input_arg { name: "input_dataset" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WindowOp.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WindowOp.pbtxt index 60e080a0472b0e..336a13805eaf6a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WindowOp.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WindowOp.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WindowOp" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WorkerHeartbeat.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WorkerHeartbeat.pbtxt index bfadb835956750..ae5c7b8caaad6a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WorkerHeartbeat.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WorkerHeartbeat.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WorkerHeartbeat" input_arg { name: "request" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WrapDatasetVariant.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WrapDatasetVariant.pbtxt index 247262ec65ce5d..0b1e4363bd20e6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WrapDatasetVariant.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WrapDatasetVariant.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WrapDatasetVariant" input_arg { name: "input_handle" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WriteAudioSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WriteAudioSummary.pbtxt index 6ec7f394c44bb2..8cc81eba8ff3a2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WriteAudioSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WriteAudioSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WriteAudioSummary" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WriteFile.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WriteFile.pbtxt index 55a4c60b10173b..6a15b39873d560 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WriteFile.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WriteFile.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WriteFile" input_arg { name: "filename" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WriteGraphSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WriteGraphSummary.pbtxt index 7851117301a1b3..2957e224f59514 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WriteGraphSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WriteGraphSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WriteGraphSummary" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WriteHistogramSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WriteHistogramSummary.pbtxt index 869b72fc133101..492d573056823b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WriteHistogramSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WriteHistogramSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WriteHistogramSummary" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WriteImageSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WriteImageSummary.pbtxt index 45e3243c6bcf40..1cfc5ca69a7f40 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WriteImageSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WriteImageSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WriteImageSummary" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WriteRawProtoSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WriteRawProtoSummary.pbtxt index 6c571b9f2c7f7a..82ac51a137894b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WriteRawProtoSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WriteRawProtoSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WriteRawProtoSummary" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WriteScalarSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WriteScalarSummary.pbtxt index e40411aabdc256..0f359a85dce91b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WriteScalarSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WriteScalarSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WriteScalarSummary" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/WriteSummary.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/WriteSummary.pbtxt index f6f887199db2e1..a641ece08df095 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/WriteSummary.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/WriteSummary.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "WriteSummary" input_arg { name: "writer" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Xdivy.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Xdivy.pbtxt index 6536322552738d..898987f947cd3a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Xdivy.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Xdivy.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Xdivy" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaConcatND.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaConcatND.pbtxt index 7d320b8f5c4544..c571497bcd5535 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaConcatND.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaConcatND.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaConcatND" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaHostCompute.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaHostCompute.pbtxt index 787e744ed8350a..9675bda59a86dd 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaHostCompute.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaHostCompute.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaHostCompute" input_arg { name: "inputs" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaRecvFromHost.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaRecvFromHost.pbtxt index 10c8f79678c334..d3760ea79b1272 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaRecvFromHost.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaRecvFromHost.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaRecvFromHost" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingActivations.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingActivations.pbtxt index 208eea2857258f..b624b26a628bc2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingActivations.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingActivations.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaRecvTPUEmbeddingActivations" input_arg { name: "deduplication_data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingActivationsV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingActivationsV2.pbtxt index c5abbb2d5f1a4e..2e8fb4d4f2530c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingActivationsV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingActivationsV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaRecvTPUEmbeddingActivationsV2" input_arg { name: "deduplication_data" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingDeduplicationData.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingDeduplicationData.pbtxt index c931212779893b..3c3b92f9d7bff7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingDeduplicationData.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingDeduplicationData.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaRecvTPUEmbeddingDeduplicationData" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingDeduplicationDataV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingDeduplicationDataV2.pbtxt index 71632c6d871452..d97710b91e46fb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingDeduplicationDataV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaRecvTPUEmbeddingDeduplicationDataV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaRecvTPUEmbeddingDeduplicationDataV2" output_arg { name: "output" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSendTPUEmbeddingGradients.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSendTPUEmbeddingGradients.pbtxt index 6da5fb2f5395cb..77f6547229554f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSendTPUEmbeddingGradients.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSendTPUEmbeddingGradients.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSendTPUEmbeddingGradients" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSendTPUEmbeddingGradientsV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSendTPUEmbeddingGradientsV2.pbtxt index f52b83abc14719..b416d0ad1a8f0c 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSendTPUEmbeddingGradientsV2.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSendTPUEmbeddingGradientsV2.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSendTPUEmbeddingGradientsV2" input_arg { name: "gradients" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSendToHost.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSendToHost.pbtxt index 90cc16e20a2e95..f2dfeaf444491e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSendToHost.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSendToHost.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSendToHost" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdagrad.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdagrad.pbtxt index 9cf626b404b1e7..bc20baf287c8b1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdagrad.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdagrad.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseCoreAdagrad" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdagradMomentum.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdagradMomentum.pbtxt index b64460485d5ac9..5e2b17ab4c9238 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdagradMomentum.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdagradMomentum.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseCoreAdagradMomentum" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdam.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdam.pbtxt index 38af8afcc1d10d..625c164bb20ed3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdam.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreAdam.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseCoreAdam" input_arg { name: "embedding_table" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreFtrl.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreFtrl.pbtxt index afbf9e024d7041..b65b707befee31 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreFtrl.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreFtrl.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseCoreFtrl" input_arg { name: "embedding_table" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreSgd.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreSgd.pbtxt index 7f507c7d722106..677ce14ddf039d 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreSgd.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseCoreSgd.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseCoreSgd" input_arg { name: "indices" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmul.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmul.pbtxt index 5ecf0c2472748a..90aa2cf22849b3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmul.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmul.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmul" input_arg { name: "row_ids" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradAndCsrInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradAndCsrInput.pbtxt index e13cbfcff32417..fbf266b10e35fa 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradAndCsrInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradAndCsrInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulGradWithAdagradAndCsrInput" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize.pbtxt index 182bfdbf3bcab2..359a038ea9b6b7 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSize" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.pbtxt index e6f2eed0c1d75b..5150a4f23b598f 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInput" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize.pbtxt index 3bd492cc55a70c..4fd6fa9bb5a5b2 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSize" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdamAndCsrInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdamAndCsrInput.pbtxt index 202e6f4f4f7b09..aaa27b25954a9e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdamAndCsrInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdamAndCsrInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulGradWithAdamAndCsrInput" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize.pbtxt index f058e30f800527..5024f72b5c66cb 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulGradWithAdamAndStaticBufferSize" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithFtrlAndCsrInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithFtrlAndCsrInput.pbtxt index 96121a6bab883d..261f25bebfd7df 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithFtrlAndCsrInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithFtrlAndCsrInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulGradWithFtrlAndCsrInput" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize.pbtxt index 23c6d671dd7914..f2f57f2f744d7b 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSize" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithSgdAndCsrInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithSgdAndCsrInput.pbtxt index 3ad518fadb629e..9446a6fa98c515 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithSgdAndCsrInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithSgdAndCsrInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulGradWithSgdAndCsrInput" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize.pbtxt index 46a1fe8d6e2f6a..dbb06c95f6d643 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulWithCsrInput.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulWithCsrInput.pbtxt index 1aa1743718a32a..2b4bc1dcba74ac 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulWithCsrInput.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulWithCsrInput.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulWithCsrInput" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulWithStaticBufferSize.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulWithStaticBufferSize.pbtxt index 85888d026c595c..471ded1635244a 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulWithStaticBufferSize.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSparseDenseMatmulWithStaticBufferSize.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSparseDenseMatmulWithStaticBufferSize" input_arg { name: "row_pointers" diff --git a/tensorflow/core/ops/compat/ops_history_v2/XlaSplitND.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/XlaSplitND.pbtxt index d4d5391b1340df..353b6e166d0f2e 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/XlaSplitND.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/XlaSplitND.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "XlaSplitND" input_arg { name: "input" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Xlog1py.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Xlog1py.pbtxt index f37a09eea17228..9fe466c2fb8165 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Xlog1py.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Xlog1py.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Xlog1py" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Xlogy.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Xlogy.pbtxt index 8da356ec49fe8c..8e7df823ff73dc 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Xlogy.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Xlogy.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Xlogy" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ZerosLike.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ZerosLike.pbtxt index c1200273821a51..5bb8d0ab3781d5 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ZerosLike.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ZerosLike.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ZerosLike" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/Zeta.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/Zeta.pbtxt index 7f86bd29b404cd..c391bd1f22c6a1 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/Zeta.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/Zeta.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "Zeta" input_arg { name: "x" diff --git a/tensorflow/core/ops/compat/ops_history_v2/ZipDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ZipDataset.pbtxt index 60efabb2d58f62..5dd34535a666d3 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ZipDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ZipDataset.pbtxt @@ -1,4 +1,4 @@ -op { +op { name: "ZipDataset" input_arg { name: "input_datasets" diff --git a/tensorflow/core/ops/mkl_nn_ops.cc b/tensorflow/core/ops/mkl_nn_ops.cc index 868e15113b406c..14a42f29ddecbd 100644 --- a/tensorflow/core/ops/mkl_nn_ops.cc +++ b/tensorflow/core/ops/mkl_nn_ops.cc @@ -1937,6 +1937,46 @@ operation. expected to invoke these operators. )doc"); +REGISTER_OP("_QuantizedMatMul") + // Variable number of inputs depending on fusion. The inputs contain + // quantized or real tensors. Some of the inputs carry min-max values for + // quantized tensors. + .Input("device_inputs: Tdevice_inputs") + .Input("host_inputs: Thost_inputs") + // Variable number of outputs depending on the main output type. For + // example, quantized output will need additional tensors to carry min-max + // values. If the output type is real tensor (e.g. Dequantize fusion), the + // op should produce only single output tensor. + .Output("device_outputs: Tdevice_outputs") + .Output("host_outputs: Thost_outputs") + .Attr("Tdevice_inputs: list(type) >= 0 = []") + .Attr("Thost_inputs: list(type) >= 0 = []") + .Attr("Tdevice_outputs: list(type) >= 0 = []") + .Attr("Thost_outputs: list(type) >= 0 = []") + // The following attributes T1, T2, U, and Tout are members of Tinputs + // and Toutputs, used here for type constraints in the templatized OpKernel + // registrations. + .Attr("T1: quantizedtype") // 0-th input     + .Attr("T2: quantizedtype") // 1st input + .Attr("Tbias: {bfloat16, float, quantizedtype} = DT_FLOAT") + // Additional inputs' type. Currently, restricting all to be of same type. + .Attr("U: {bfloat16, float, quantizedtype} = DT_FLOAT") + .Attr("Tout: {bfloat16, float, quantizedtype} = DT_FLOAT") // 0-th output   + .Attr("transpose_a: bool = false") + .Attr("transpose_b: bool = false") + .Attr("is_weight_const: bool = true") + .Attr("is_bias_const: bool = true") + .Attr("fused_ops: list(string) = []") + // Attribute for quantization mode of all quantized input tensors. + // Currently restricting all operands using same quantization mode. + .Attr("input_quant_mode: {'MIN_FIRST', 'SCALED'} = 'SCALED'") + // Attribute for activation (0-th output) requnatization mode + .Attr("output_quant_mode: {'MIN_FIRST', 'SCALED'} = 'SCALED'") + // Attributes for the LeakyRelu ----------------------------------------- // + .Attr("leakyrelu_alpha: float = 0.2") + // ---------------------------------------------------------------------- // + .SetShapeFn(shape_inference::MatMulShape); + } // namespace tensorflow #endif // INTEL_MKL diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt index 2c64cab5e211e2..042577243a4e90 100644 --- a/tensorflow/core/ops/ops.pbtxt +++ b/tensorflow/core/ops/ops.pbtxt @@ -1,4 +1,3 @@ -go/nodeserialize op { name: "Abort" attr { @@ -60061,6 +60060,8 @@ op { type: DT_DOUBLE type: DT_INT32 type: DT_INT64 + type: DT_UINT32 + type: DT_UINT64 } } } diff --git a/tensorflow/core/ops/parsing_ops.cc b/tensorflow/core/ops/parsing_ops.cc index d765455b6f1864..a3e801a87099c7 100644 --- a/tensorflow/core/ops/parsing_ops.cc +++ b/tensorflow/core/ops/parsing_ops.cc @@ -498,7 +498,7 @@ REGISTER_OP("DecodeCSV") REGISTER_OP("StringToNumber") .Input("string_tensor: string") .Output("output: out_type") - .Attr("out_type: {float, double, int32, int64} = DT_FLOAT") + .Attr("out_type: {float, double, int32, int64, uint32, uint64} = DT_FLOAT") .SetShapeFn(shape_inference::UnchangedShape); } // namespace tensorflow diff --git a/tensorflow/core/platform/file_system_test.cc b/tensorflow/core/platform/file_system_test.cc index 2ecc93f11ab507..1c4d978f13ca6c 100644 --- a/tensorflow/core/platform/file_system_test.cc +++ b/tensorflow/core/platform/file_system_test.cc @@ -38,7 +38,7 @@ class InterPlanetaryFileSystem : public NullFileSystem { string parsed_path; ParsePath(fname, &parsed_path); if (BodyExists(parsed_path)) { - return OkStatus(); + return absl::OkStatus(); } return Status(absl::StatusCode::kNotFound, "File does not exist"); } @@ -58,13 +58,13 @@ class InterPlanetaryFileSystem : public NullFileSystem { return Status(absl::StatusCode::kInvalidArgument, "Bad dirname"); } if (split_path.empty()) { - return OkStatus(); + return absl::OkStatus(); } if (split_path.size() == 1) { celestial_bodies_[""].insert(parsed_path); celestial_bodies_.insert( std::pair>(parsed_path, {})); - return OkStatus(); + return absl::OkStatus(); } if (split_path.size() == 2) { if (!BodyExists(split_path[0])) { @@ -74,7 +74,7 @@ class InterPlanetaryFileSystem : public NullFileSystem { celestial_bodies_[split_path[0]].insert(split_path[1]); celestial_bodies_.insert( std::pair>(parsed_path, {})); - return OkStatus(); + return absl::OkStatus(); } if (split_path.size() == 3) { const string& parent_path = this->JoinPath(split_path[0], split_path[1]); @@ -85,7 +85,7 @@ class InterPlanetaryFileSystem : public NullFileSystem { celestial_bodies_[parent_path].insert(split_path[2]); celestial_bodies_.insert( std::pair>(parsed_path, {})); - return OkStatus(); + return absl::OkStatus(); } return Status(absl::StatusCode::kFailedPrecondition, "Failed to create"); } @@ -102,7 +102,7 @@ class InterPlanetaryFileSystem : public NullFileSystem { return Status(absl::StatusCode::kFailedPrecondition, "Not a dir"); } if (celestial_bodies_.find(parsed_path) != celestial_bodies_.end()) { - return OkStatus(); + return absl::OkStatus(); } return Status(absl::StatusCode::kFailedPrecondition, "Not a dir"); } @@ -114,7 +114,7 @@ class InterPlanetaryFileSystem : public NullFileSystem { ParsePath(dir, &parsed_path); result->insert(result->begin(), celestial_bodies_[parsed_path].begin(), celestial_bodies_[parsed_path].end()); - return OkStatus(); + return absl::OkStatus(); } private: @@ -279,7 +279,7 @@ class TestFileSystem : public NullFileSystem { // Only allow for a single root directory. Status IsDirectory(const string& dirname, TransactionToken* token) override { if (dirname == "." || dirname.empty()) { - return OkStatus(); + return absl::OkStatus(); } return Status(absl::StatusCode::kFailedPrecondition, "Not a dir"); } @@ -290,7 +290,7 @@ class TestFileSystem : public NullFileSystem { if (dir == "." || dir.empty()) { result->push_back("test"); } - return OkStatus(); + return absl::OkStatus(); } }; diff --git a/tensorflow/core/profiler/convert/trace_viewer/BUILD b/tensorflow/core/profiler/convert/trace_viewer/BUILD index 8b2ff03a343174..bdb65463505483 100644 --- a/tensorflow/core/profiler/convert/trace_viewer/BUILD +++ b/tensorflow/core/profiler/convert/trace_viewer/BUILD @@ -118,6 +118,7 @@ cc_library( "@com_google_absl//absl/status", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:optional", + "@local_tsl//tsl/lib/io:iterator", "@local_tsl//tsl/platform:status", "@local_tsl//tsl/profiler/utils:timespan", ], diff --git a/tensorflow/core/profiler/convert/trace_viewer/trace_events.cc b/tensorflow/core/profiler/convert/trace_viewer/trace_events.cc index 35ac11941f4d0e..c514988f8f2b12 100644 --- a/tensorflow/core/profiler/convert/trace_viewer/trace_events.cc +++ b/tensorflow/core/profiler/convert/trace_viewer/trace_events.cc @@ -28,6 +28,7 @@ limitations under the License. #include "absl/base/internal/endian.h" #include "absl/log/check.h" #include "absl/log/log.h" +#include "absl/status/status.h" #include "absl/strings/string_view.h" #include "tensorflow/core/platform/file_system.h" #include "tensorflow/core/platform/macros.h" @@ -36,6 +37,7 @@ limitations under the License. #include "tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.h" #include "tensorflow/core/profiler/protobuf/trace_events.pb.h" #include "tensorflow/core/profiler/protobuf/trace_events_raw.pb.h" +#include "tsl/lib/io/iterator.h" #include "tsl/lib/io/table.h" #include "tsl/lib/io/table_builder.h" #include "tsl/lib/io/table_options.h" @@ -170,6 +172,36 @@ std::vector> GetEventsByLevel( return events_by_level; } +absl::Status ReadFileTraceMetadata(std::string& filepath, Trace* trace) { + // 1. Open the file. + uint64_t file_size; + TF_RETURN_IF_ERROR(tsl::Env::Default()->GetFileSize(filepath, &file_size)); + + tsl::FileSystem* file_system; + TF_RETURN_IF_ERROR( + tsl::Env::Default()->GetFileSystemForFile(filepath, &file_system)); + + std::unique_ptr file; + TF_RETURN_IF_ERROR(file_system->NewRandomAccessFile(filepath, &file)); + + tsl::table::Options options; + options.block_size = 20 * 1024 * 1024; + tsl::table::Table* table = nullptr; + TF_RETURN_IF_ERROR( + tsl::table::Table::Open(options, file.get(), file_size, &table)); + std::unique_ptr table_deleter(table); + + std::unique_ptr iterator(table->NewIterator()); + if (iterator == nullptr) return absl::UnknownError("Could not open table"); + + // 2. Read the metadata. + iterator->SeekToFirst(); + if (!ReadTraceMetadata(iterator.get(), kTraceMetadataKey, trace)) { + return absl::UnknownError("Could not parse Trace proto"); + } + return absl::OkStatus(); +} + // Store the contents of this container in an sstable file. The format is as // follows: // @@ -265,7 +297,8 @@ tsl::Status DoLoadFromLevelDbTable( // Read the metadata. iterator->SeekToFirst(); if (!ReadTraceMetadata(iterator.get(), kTraceMetadataKey, &trace)) { - return tsl::errors::Unknown("Could not parse Trace proto"); + return absl::UnknownError( + "Could not parse Trace proto to read trace metadata"); } if (filter) filter->SetUp(trace); diff --git a/tensorflow/core/profiler/convert/trace_viewer/trace_events.h b/tensorflow/core/profiler/convert/trace_viewer/trace_events.h index 68a27c22f86b67..fb2ae7766a6c58 100644 --- a/tensorflow/core/profiler/convert/trace_viewer/trace_events.h +++ b/tensorflow/core/profiler/convert/trace_viewer/trace_events.h @@ -37,6 +37,7 @@ limitations under the License. #include "tensorflow/core/profiler/lib/context_types.h" #include "tensorflow/core/profiler/protobuf/task.pb.h" #include "tensorflow/core/profiler/protobuf/trace_events.pb.h" +#include "tsl/lib/io/table.h" #include "tsl/platform/errors.h" #include "tsl/platform/file_system.h" #include "tsl/platform/status.h" @@ -65,6 +66,9 @@ tsl::Status DoLoadFromLevelDbTable( const std::function& copy_event_to_arena, const std::function& add_arena_event); +// Reads the trace metadata from a file with given path +absl::Status ReadFileTraceMetadata(std::string& filepath, Trace* trace); + std::vector> GetEventsByLevel( const Trace& trace, std::vector& events); diff --git a/tensorflow/core/profiler/utils/xplane_utils.h b/tensorflow/core/profiler/utils/xplane_utils.h index f78350760106b2..75ed0d1b3ed330 100644 --- a/tensorflow/core/profiler/utils/xplane_utils.h +++ b/tensorflow/core/profiler/utils/xplane_utils.h @@ -27,6 +27,7 @@ namespace profiler { using tsl::profiler::AddFlowsToXplane; // NOLINT using tsl::profiler::AggregateXPlane; // NOLINT +using tsl::profiler::FindLinesWithId; // NOLINT using tsl::profiler::FindLineWithId; // NOLINT using tsl::profiler::FindLineWithName; // NOLINT using tsl::profiler::FindMutablePlanes; // NOLINT diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index ad2911fae4c67f..c436412c0e88c6 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -108,7 +108,7 @@ limitations under the License. #define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0 #define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0 -#define TF_GRAPH_DEF_VERSION 1860 // Updated: 2024/5/12 +#define TF_GRAPH_DEF_VERSION 1869 // Updated: 2024/5/21 // Checkpoint compatibility versions (the versions field in SavedSliceMeta). // diff --git a/tensorflow/core/runtime_fallback/conversion/BUILD b/tensorflow/core/runtime_fallback/conversion/BUILD index 9b90ab2aa3d062..bf533491c77136 100644 --- a/tensorflow/core/runtime_fallback/conversion/BUILD +++ b/tensorflow/core/runtime_fallback/conversion/BUILD @@ -29,11 +29,15 @@ cc_library( srcs = ["conversion.cc"], hdrs = ["conversion.h"], deps = [ + "//tensorflow/core:framework", "//tensorflow/core:protos_all_cc", + "//tensorflow/core/common_runtime/eager:tensor_handle", + "//tensorflow/core/framework:tensor", + "//tensorflow/core/platform:status", "//tensorflow/core/runtime_fallback/kernel:kernel_fallback_tensor", "//tensorflow/core/runtime_fallback/kernel:tensor_util", + "//tensorflow/core/runtime_fallback/runtime:kernel_utils", "//tensorflow/core/runtime_fallback/runtime:runtime_fallback_tensor", - "//tensorflow/core/tfrt/utils:error_util", "@tf_runtime//:hostcontext", "@tf_runtime//:tensor", ], diff --git a/tensorflow/core/runtime_fallback/conversion/conversion.cc b/tensorflow/core/runtime_fallback/conversion/conversion.cc index 71d97860aaa0af..f7a8fe018d18e2 100644 --- a/tensorflow/core/runtime_fallback/conversion/conversion.cc +++ b/tensorflow/core/runtime_fallback/conversion/conversion.cc @@ -20,11 +20,15 @@ limitations under the License. #include +#include "tensorflow/core/common_runtime/eager/tensor_handle.h" +#include "tensorflow/core/framework/device.h" +#include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/platform/status.h" #include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_tensor.h" #include "tensorflow/core/runtime_fallback/kernel/tensor_util.h" +#include "tensorflow/core/runtime_fallback/runtime/kernel_utils.h" #include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_tensor.h" -#include "tensorflow/core/tfrt/utils/error_util.h" #include "tfrt/host_context/async_value_ref.h" // from @tf_runtime #include "tfrt/host_context/device.h" // from @tf_runtime #include "tfrt/tensor/conversion_registry.h" // from @tf_runtime diff --git a/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.cc b/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.cc index d056f90d45638d..aa48bcf6be10f0 100644 --- a/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.cc +++ b/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.cc @@ -286,7 +286,7 @@ tfrt::AsyncValueRef KernelFallbackExecuteCompatCoreRuntimeDispatch( // TODO(b/176997538): Skip checking dtypes for tf._BatchFunctionFallback op // due to b/176997538. Remove the skipping once the SavedModel lowering // problem is fixed. - if (!status.ok() && !op_name.equals("_BatchFunctionFallback")) { + if (!status.ok() && op_name != "_BatchFunctionFallback") { KernelFallbackEmitError(exec_ctx, &fallback_request_state, op_name, &op_chain, results, status); return op_chain; diff --git a/tensorflow/core/runtime_fallback/util/attr_util_test.cc b/tensorflow/core/runtime_fallback/util/attr_util_test.cc index b3febbb9373774..91811eb63dd01c 100644 --- a/tensorflow/core/runtime_fallback/util/attr_util_test.cc +++ b/tensorflow/core/runtime_fallback/util/attr_util_test.cc @@ -283,6 +283,7 @@ TEST(UtilsTest, FillAttrValueMapOk) { attrs.SetArray("shape", tfrt::ArrayRef{2, 2}); attrs.SetArray("values", tfrt::ArrayRef{2}); attrs.SetArray("flags", tfrt::ArrayRef{false, true}); + attrs.SetArray("baz", tfrt::ArrayRef{'a'}); attrs.Set("transpose_a", false); attrs.Set("transpose_b", true); @@ -290,6 +291,9 @@ TEST(UtilsTest, FillAttrValueMapOk) { attrs.Set("foo", 2); attrs.Set("bar", 2); + tfrt::AggregateAttr aggAttr; + attrs.Set("aggAttr", aggAttr); + AttrValueMap map; auto host_context = CreateTestHostContext(); @@ -303,10 +307,12 @@ TEST(UtilsTest, FillAttrValueMapOk) { Pair(Eq("shape"), EqualsProto(R"pb(list { i: 2 i: 2 })pb")), Pair(Eq("values"), EqualsProto(R"pb(list { f: 2 })pb")), Pair(Eq("flags"), EqualsProto(R"pb(list { b: false b: true })pb")), + Pair(Eq("baz"), EqualsProto(R"pb(s: "a")pb")), Pair(Eq("transpose_a"), EqualsProto(R"pb(b: false)pb")), Pair(Eq("transpose_b"), EqualsProto(R"pb(b: true)pb")), Pair(Eq("foo"), EqualsProto(R"pb(f: 2)pb")), - Pair(Eq("bar"), EqualsProto(R"pb(i: 2)pb")))); + Pair(Eq("bar"), EqualsProto(R"pb(i: 2)pb")), + Pair(Eq("aggAttr"), EqualsProto(R"pb(list {})pb")))); } } // namespace diff --git a/tensorflow/core/tfrt/common/BUILD b/tensorflow/core/tfrt/common/BUILD index 2de9b7fcf904b3..8129cdb0ea0f65 100644 --- a/tensorflow/core/tfrt/common/BUILD +++ b/tensorflow/core/tfrt/common/BUILD @@ -70,6 +70,19 @@ cc_library( ], ) +tf_cc_test( + name = "async_value_tensor_test", + srcs = ["async_value_tensor_test.cc"], + deps = [ + ":async_value_tensor", + "//tensorflow/core:framework", + "//tensorflow/core:protos_all_cc", + "@com_google_googletest//:gtest_main", + "@local_xla//xla/pjrt:pjrt_client", + "@local_xla//xla/tsl/concurrency:async_value", + ], +) + cc_library( name = "pjrt_state", srcs = [ @@ -163,6 +176,8 @@ tf_cc_test( "//tensorflow/core:framework", "@local_tsl//tsl/lib/core:status_test_util", "@local_tsl//tsl/platform:status_matchers", + "@local_tsl//tsl/platform:statusor", + "@local_tsl//tsl/platform:test", "@local_tsl//tsl/platform:test_main", "@local_tsl//tsl/protobuf:error_codes_proto_impl_cc", "@local_xla//xla/pjrt:tfrt_cpu_pjrt_client", diff --git a/tensorflow/core/tfrt/common/async_value_tensor_test.cc b/tensorflow/core/tfrt/common/async_value_tensor_test.cc new file mode 100644 index 00000000000000..a7c89af53c9da7 --- /dev/null +++ b/tensorflow/core/tfrt/common/async_value_tensor_test.cc @@ -0,0 +1,76 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/tfrt/common/async_value_tensor.h" + +#include +#include + +#include +#include "xla/pjrt/pjrt_client.h" +#include "xla/tsl/concurrency/async_value_ref.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/framework/types.pb.h" + +namespace tensorflow { +namespace { + +TEST(AsyncValueTensorTest, InvalidTensor) { + tensorflow::Tensor tensor(tensorflow::DT_INT64, tensorflow::TensorShape({1})); + + AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor); + + ASSERT_EQ(avt, nullptr); +} + +TEST(AsyncValueTensorTest, SetAndGetAsyncValue) { + AsyncValueAllocator allocator; + tensorflow::Tensor tensor(&allocator, tensorflow::DT_INT64, + tensorflow::TensorShape({1})); + + AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor); + + ASSERT_NE(avt, nullptr); + + tsl::AsyncValueRef value = + tsl::MakeConstructedAsyncValueRef(123); + + avt->SetAsyncRef(value.CopyRCRef()); + + auto ret_value = avt->GetAsyncRef(); + ASSERT_EQ(ret_value, value.CopyRCRef()); +} + +TEST(AsyncValueTensorTest, SetAndGetBuffer) { + AsyncValueAllocator allocator; + tensorflow::Tensor tensor(&allocator, tensorflow::DT_INT64, + tensorflow::TensorShape({1})); + + AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor); + + ASSERT_NE(avt, nullptr); + + std::shared_ptr buffer; + + avt->SetBuffer(buffer); + + auto ret_buffer = avt->GetBuffer(); + + ASSERT_EQ(ret_buffer, buffer); +} + +} // namespace +} // namespace tensorflow diff --git a/tensorflow/core/tfrt/common/pjrt_util_test.cc b/tensorflow/core/tfrt/common/pjrt_util_test.cc index 68cc2e98dd1a5d..f8de14dd034812 100644 --- a/tensorflow/core/tfrt/common/pjrt_util_test.cc +++ b/tensorflow/core/tfrt/common/pjrt_util_test.cc @@ -14,6 +14,9 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/core/tfrt/common/pjrt_util.h" +#include +#include + #include "xla/pjrt/tfrt_cpu_pjrt_client.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/types.h" @@ -21,11 +24,14 @@ limitations under the License. #include "tensorflow/core/tfrt/common/pjrt_state.h" #include "tsl/lib/core/status_test_util.h" #include "tsl/platform/status_matchers.h" +#include "tsl/platform/statusor.h" +#include "tsl/platform/test.h" #include "tsl/protobuf/error_codes.pb.h" namespace tensorflow { namespace { +using ::testing::ElementsAre; using ::testing::HasSubstr; using ::tsl::testing::StatusIs; @@ -44,5 +50,17 @@ TEST(PjRtStateResourceManagerTest, SetNullPjRtClient) { StatusIs(error::INVALID_ARGUMENT, HasSubstr("PJRT client is nullptr"))); } +TEST(PjRtGpuClientCreationInfoTest, SetAndGet) { + auto info = std::make_unique(); + info->allowed_devices.insert(123); + TF_ASSERT_OK( + SetPjRtGpuClientCreationInfoInTFGlobalResourceManager(std::move(info))); + + TF_ASSERT_OK_AND_ASSIGN(PjRtGpuClientCreationInfo * retrieved_info, + GetPjRtGpuClientCreationInfo()); + + EXPECT_THAT(retrieved_info->allowed_devices, ElementsAre(123)); +} + } // namespace } // namespace tensorflow diff --git a/tensorflow/core/tfrt/fallback/BUILD b/tensorflow/core/tfrt/fallback/BUILD index 454c102deec081..c77b4133def642 100644 --- a/tensorflow/core/tfrt/fallback/BUILD +++ b/tensorflow/core/tfrt/fallback/BUILD @@ -57,10 +57,13 @@ tf_cc_test( srcs = ["fallback_state_test.cc"], deps = [ ":fallback_state", - "//tensorflow/core:framework", + "//tensorflow/cc:cc_ops", + "//tensorflow/cc:const_op", + "//tensorflow/cc:ops", + "//tensorflow/cc:scope", + "//tensorflow/core:all_kernels", "//tensorflow/core:test", "//tensorflow/core:test_main", - "//tensorflow/core/framework:function_proto_cc", "//tensorflow/core/platform:status_matchers", "//tensorflow/core/protobuf:error_codes_proto_impl_cc", ], diff --git a/tensorflow/core/tfrt/fallback/fallback_state.h b/tensorflow/core/tfrt/fallback/fallback_state.h index ae5158018b9b67..cf293b1b406a28 100644 --- a/tensorflow/core/tfrt/fallback/fallback_state.h +++ b/tensorflow/core/tfrt/fallback/fallback_state.h @@ -62,6 +62,7 @@ class FallbackState { const SessionOptions &session_options() const { return session_options_; } const DeviceMgr &device_manager() const { return device_manager_; } + DeviceMgr &device_manager() { return device_manager_; } const DeviceSet &device_set() const { return device_set_; } diff --git a/tensorflow/core/tfrt/fallback/fallback_state_test.cc b/tensorflow/core/tfrt/fallback/fallback_state_test.cc index 352a5119e69460..d7d55311e7ffd4 100644 --- a/tensorflow/core/tfrt/fallback/fallback_state_test.cc +++ b/tensorflow/core/tfrt/fallback/fallback_state_test.cc @@ -14,11 +14,15 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/core/tfrt/fallback/fallback_state.h" -#include "tensorflow/core/framework/function.h" -#include "tensorflow/core/framework/function.pb.h" +#include + +#include "tensorflow/cc/framework/ops.h" +#include "tensorflow/cc/framework/scope.h" +#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/error_codes.pb.h" +#include "tsl/lib/core/status_test_util.h" namespace tensorflow { namespace { @@ -54,5 +58,37 @@ TEST(FallbackStateTest, CreateRendezvous) { HasSubstr("rendezvous")))); } +TEST(FallbackStateTest, CreateGraphExecutionState) { + tensorflow::SessionOptions session_options; + tensorflow::FunctionDefLibrary fdef_lib; + TF_ASSERT_OK_AND_ASSIGN( + auto fallback_state, + tfrt_stub::FallbackState::CreateWithCpuDevice(session_options, fdef_lib)); + + GraphDef graphdef; + { + auto scope = tensorflow::Scope::NewRootScope().WithDevice( + "/job:localhost/replica:0/task:0/device:CPU:0"); + + Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1}); + + TF_ASSERT_OK(scope.ToGraphDef(&graphdef)); + } + + TF_ASSERT_OK_AND_ASSIGN( + auto graph_execution_state, + fallback_state->CreateGraphExecutionState(std::move(graphdef))); +} + +TEST(FallbackStateTest, CreateWithMockGpuDevice) { + tensorflow::SessionOptions session_options; + tensorflow::FunctionDefLibrary fdef_lib; + TF_ASSERT_OK_AND_ASSIGN(auto fallback_state, + tfrt_stub::FallbackState::CreateWithMockGpuDevice( + session_options, fdef_lib)); + const auto& device_manager = fallback_state->device_manager(); + EXPECT_GT(device_manager.NumDeviceType("GPU"), 0); +} + } // namespace } // namespace tensorflow diff --git a/tensorflow/core/tfrt/gpu/kernel/BUILD b/tensorflow/core/tfrt/gpu/kernel/BUILD index 4c9046644c93c2..16599c77cbd4e0 100644 --- a/tensorflow/core/tfrt/gpu/kernel/BUILD +++ b/tensorflow/core/tfrt/gpu/kernel/BUILD @@ -79,9 +79,12 @@ tf_cuda_cc_test( name = "gpu_runner_test", srcs = ["gpu_runner_test.cc"], tags = [ - "config-cuda-only", + "gpu", # Only enables test on GPU. "no_oss", # This test only runs with GPU. - "requires-gpu-nvidia", + "noasan", + "nomsan", + "noopt", + "notsan", ], deps = [ ":gpu_runner", @@ -102,6 +105,7 @@ tf_cuda_cc_test( "//tensorflow/core/tfrt/common:pjrt_util", "//tensorflow/core/tfrt/fallback:fallback_state", "@com_google_googletest//:gtest_main", + "@local_tsl//tsl/framework:serving_device_selector_policies", "@tf_runtime//:hostcontext", "@tf_runtime//:tensor", ], diff --git a/tensorflow/core/tfrt/gpu/kernel/gpu_runner_test.cc b/tensorflow/core/tfrt/gpu/kernel/gpu_runner_test.cc index e2b999cb23ec06..7371f62df1f20b 100644 --- a/tensorflow/core/tfrt/gpu/kernel/gpu_runner_test.cc +++ b/tensorflow/core/tfrt/gpu/kernel/gpu_runner_test.cc @@ -15,6 +15,8 @@ limitations under the License. #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/tfrt/gpu/kernel/gpu_runner.h" +#include + #include #include #include "tensorflow/cc/framework/scope.h" @@ -145,7 +147,7 @@ class GpuRunnerTest : public ::testing::Test { exec_ctx_ = std::make_unique(std::move(*req_ctx)); // Create a gpu runner. - auto policy = std::make_unique(); + auto policy = std::make_unique(); serving_device_selector_ = std::make_unique( kNumVirtualGpuDevices, std::move(policy)); gpu_runner_ = std::make_unique(serving_device_selector_.get()); diff --git a/tensorflow/core/tfrt/ifrt/BUILD b/tensorflow/core/tfrt/ifrt/BUILD index 369fecac053dbe..8b3d61c8bbbc12 100644 --- a/tensorflow/core/tfrt/ifrt/BUILD +++ b/tensorflow/core/tfrt/ifrt/BUILD @@ -95,6 +95,7 @@ cc_library( "@local_xla//xla/pjrt:host_callback", "@local_xla//xla/pjrt:pjrt_executable", "@local_xla//xla/python/ifrt", + "@local_xla//xla/python/ifrt/hlo:hlo_program", "@local_xla//xla/python/pjrt_ifrt", "@local_xla//xla/python/pjrt_ifrt:xla_ifrt", "@local_xla//xla/service:computation_placer_hdr", @@ -145,6 +146,7 @@ cc_library( srcs = ["ifrt_loaded_variable_registry.cc"], hdrs = ["ifrt_loaded_variable_registry.h"], deps = [ + "@com_google_absl//absl/base:core_headers", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/functional:any_invocable", @@ -170,15 +172,11 @@ cc_library( ":ifrt_serving_core_selector", "//tensorflow/compiler/tf2xla:xla_helpers", "//tensorflow/core:core_cpu_base", - "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/status", - "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings", - "@com_google_absl//absl/synchronization", "@local_tsl//tsl/platform:env", "@local_tsl//tsl/platform:errors", "@local_xla//xla/python/ifrt", - "@local_xla//xla/tsl/concurrency:ref_count", "@tf_runtime//:hostcontext", ], ) @@ -219,7 +217,6 @@ cc_library( "//tensorflow/core:framework", "//tensorflow/core:protos_all_cc", "//tensorflow/core/platform:status", - "//tensorflow/core/platform:statusor", "//tensorflow/core/tpu/kernels:sharding_utils", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/log", @@ -232,7 +229,6 @@ cc_library( "@local_tsl//tsl/platform:env", "@local_tsl//tsl/platform:errors", "@local_tsl//tsl/platform:statusor", - "@local_xla//xla:executable_run_options", "@local_xla//xla/hlo/ir:hlo", "@local_xla//xla/python/ifrt", "@local_xla//xla/python/pjrt_ifrt:xla_ifrt", @@ -298,6 +294,61 @@ cc_library( ], ) +cc_library( + name = "ifrt_serving_executable_test_util", + testonly = True, + srcs = ["ifrt_serving_executable_test_util.cc"], + hdrs = ["ifrt_serving_executable_test_util.h"], + data = [ + "//tensorflow/core/tfrt/ifrt/testdata", + ], + deps = [ + ":ifrt_loaded_variable_registry", + ":ifrt_restore_tensor_registry", + ":ifrt_serving_core_selector", + ":ifrt_serving_executable", + ":tf_host_callback", + "//tensorflow/compiler/mlir/tensorflow", + "//tensorflow/compiler/tf2xla:xla_helpers", + "//tensorflow/core:core_cpu_base", + "//tensorflow/core/framework:tensor", + "//tensorflow/core/framework:types_proto_cc", + "//tensorflow/core/platform:resource_loader", + "@com_google_absl//absl/status:statusor", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:string_view", + "@llvm-project//mlir:AllPassesAndDialects", + "@llvm-project//mlir:IR", + "@llvm-project//mlir:Parser", + "@local_tsl//tsl/framework/test_util:mock_serving_device_selector", + "@local_tsl//tsl/platform:env", + "@local_tsl//tsl/platform:status", + "@local_xla//xla/python/ifrt", + "@local_xla//xla/python/ifrt:test_util", + "@tf_runtime//:hostcontext", + ], +) + +tf_cc_test( + name = "ifrt_restore_tensor_registry_test", + srcs = ["ifrt_restore_tensor_registry_test.cc"], + tags = ["no_oss"], + deps = [ + ":ifrt_restore_tensor_registry", + "//tensorflow/compiler/mlir/tfrt/transforms/ifrt:ifrt_types", + "//tensorflow/core:framework", + "//tensorflow/core/framework:tensor", + "//tensorflow/core/framework:tensor_testutil", + "//tensorflow/core/framework:types_proto_cc", + "@com_google_absl//absl/status", + "@com_google_googletest//:gtest_main", + "@local_tsl//tsl/lib/core:status_test_util", + "@local_tsl//tsl/platform:status_matchers", + "@local_tsl//tsl/platform:statusor", + "@local_xla//xla/python/ifrt", + ], +) + tf_cc_test( name = "ifrt_loaded_variable_utils_test", srcs = ["ifrt_loaded_variable_utils_test.cc"], @@ -397,13 +448,10 @@ tf_cc_test( ], tags = ["no_oss"], deps = [ - ":ifrt_loaded_variable_registry", ":ifrt_restore_tensor_registry", ":ifrt_serving_core_selector", ":ifrt_serving_executable", - ":sharding_utils", - ":tf_host_callback", - "//tensorflow/compiler/mlir/tensorflow", + ":ifrt_serving_executable_test_util", "//tensorflow/compiler/tf2xla:xla_helpers", "//tensorflow/core:core_cpu_base", "//tensorflow/core:framework", @@ -415,29 +463,22 @@ tf_cc_test( "//tensorflow/core/framework:tensor_matcher", "//tensorflow/core/framework:tensor_testutil", "//tensorflow/core/framework:types_proto_cc", - "//tensorflow/core/platform:resource_loader", "@com_google_absl//absl/log:check", "@com_google_absl//absl/status", "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:span", "@com_google_googletest//:gtest_main", - "@llvm-project//mlir:AllPassesAndDialects", - "@llvm-project//mlir:IR", - "@llvm-project//mlir:Parser", "@local_tsl//tsl/framework:serving_device_selector", "@local_tsl//tsl/framework/test_util:mock_serving_device_selector", "@local_tsl//tsl/platform:env", "@local_tsl//tsl/platform:statusor", "@local_tsl//tsl/platform:tstring", - "@local_xla//xla/hlo/ir:hlo", "@local_xla//xla/python/ifrt", "@local_xla//xla/python/ifrt:test_util", "@local_xla//xla/python/pjrt_ifrt:tfrt_cpu_client_test_lib", - "@local_xla//xla/tsl/concurrency:ref_count", "@tf_runtime//:basic_kernels_alwayslink", "@tf_runtime//:core_runtime_alwayslink", - "@tf_runtime//:hostcontext", "@tf_runtime//:test_kernels_alwayslink", "@tf_runtime//backends/cpu:core_runtime_alwayslink", "@tf_runtime//backends/cpu:tf_ops_alwayslink", @@ -469,6 +510,7 @@ tf_cc_test( "//tensorflow/core/framework:tensor", "//tensorflow/core/framework:types_proto_cc", "//tensorflow/core/platform:resource_loader", + "//tensorflow/core/platform:status_matchers", "@com_google_absl//absl/status", "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings", diff --git a/tensorflow/core/tfrt/ifrt/ifrt_config.proto b/tensorflow/core/tfrt/ifrt/ifrt_config.proto index 38c0f6ec0e0902..f11d61049ce666 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_config.proto +++ b/tensorflow/core/tfrt/ifrt/ifrt_config.proto @@ -10,3 +10,11 @@ message VariableDeviceShardingConfigProto { xla.OpSharding sharding = 1; repeated int32 device_ids = 2; } + +enum IfrtServingCoreSelectionPolicy { + // Default policy to select the soonest-to-finish core defined at + // http://shortn/_XQhI1ASAu0. + IFRT_SERVING_CORE_SELECTION_POLICY_DEFAULT = 0; + // Policy that round robin with local ordinal http://shortn/_7BtVe4dkp5. + IFRT_SERVING_CORE_SELECTION_POLICY_LOCAL_ROUND_ROBIN = 1; +} diff --git a/tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h b/tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h index c57f0c0f353387..5275b683027602 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h +++ b/tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h @@ -22,6 +22,7 @@ limitations under the License. #include "absl/base/thread_annotations.h" #include "absl/container/flat_hash_map.h" +#include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/synchronization/mutex.h" #include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h" diff --git a/tensorflow/core/tfrt/ifrt/ifrt_executable_registry_test.cc b/tensorflow/core/tfrt/ifrt/ifrt_executable_registry_test.cc index 5dcdc6e80cca2c..e3ee0e43547771 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_executable_registry_test.cc +++ b/tensorflow/core/tfrt/ifrt/ifrt_executable_registry_test.cc @@ -40,6 +40,7 @@ limitations under the License. #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/resource_loader.h" +#include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h" #include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h" @@ -118,6 +119,44 @@ TEST(IfrtExecutableRegistry, Basic) { ASSERT_EQ(executable_ptr, raw_ptr); } +TEST(IfrtExecutableRegistry, DuplicateRegistrationFails) { + mlir::DialectRegistry registry; + mlir::registerAllDialects(registry); + mlir::RegisterAllTensorFlowDialects(registry); + + mlir::MLIRContext context(registry); + + int64_t program_id = 1234; + + TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr executable, + CreateIfrtServingExecutable(context, program_id)); + TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register( + program_id, std::move(executable))); + + EXPECT_THAT( + ServingExecutableRegistry::Register(program_id, std::move(executable)), + testing::StatusIs(absl::StatusCode::kAlreadyExists)); +} + +TEST(IfrtExecutableRegistry, ReleaseOk) { + mlir::DialectRegistry registry; + mlir::registerAllDialects(registry); + mlir::RegisterAllTensorFlowDialects(registry); + + mlir::MLIRContext context(registry); + + int64_t program_id = 1234; + + TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr executable, + CreateIfrtServingExecutable(context, program_id)); + TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register( + program_id, std::move(executable))); + + handle.Release(); + + EXPECT_EQ(ServingExecutableRegistry::Lookup(program_id), nullptr); +} + TEST(IfrtExecutableRegistry, FreezeOk) { mlir::DialectRegistry registry; mlir::registerAllDialects(registry); @@ -142,6 +181,27 @@ TEST(IfrtExecutableRegistry, FreezeOk) { ASSERT_EQ(executable_ptr, raw_ptr); } +TEST(IfrtExecutableRegistry, FreezeFailedProgramNotRegistered) { + mlir::DialectRegistry registry; + mlir::registerAllDialects(registry); + mlir::RegisterAllTensorFlowDialects(registry); + + mlir::MLIRContext context(registry); + + int64_t program_id = 1234; + + TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr executable, + CreateIfrtServingExecutable(context, program_id)); + + TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register( + program_id, std::move(executable))); + + handle.Release(); + + EXPECT_THAT(handle.Freeze(), + testing::StatusIs(absl::StatusCode::kFailedPrecondition)); +} + TEST(IfrtExecutableRegistry, InvalidProgramIdShallReturnNull) { int64_t program_id = 1234; diff --git a/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.cc b/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.cc index 5ef8f3cbf77e03..73651cf7ed5cf1 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.cc +++ b/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.cc @@ -22,7 +22,6 @@ limitations under the License. #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" -#include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "tsl/platform/statusor.h" diff --git a/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h b/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h index 0c66af49a50fc4..e799c571a246b7 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h +++ b/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h @@ -18,6 +18,7 @@ limitations under the License. #include +#include "absl/base/thread_annotations.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/functional/any_invocable.h" diff --git a/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h b/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h index dbf464ce4540d9..4d07d1a3771a8a 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h +++ b/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h @@ -20,6 +20,9 @@ limitations under the License. #include #include "absl/status/status.h" +#include "absl/status/statusor.h" +#include "absl/strings/string_view.h" +#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h" #include "xla/python/ifrt/client.h" #include "tensorflow/core/framework/resource_handle.h" #include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h" diff --git a/tensorflow/core/tfrt/ifrt/ifrt_model_context.cc b/tensorflow/core/tfrt/ifrt/ifrt_model_context.cc index 2d5b6b79002089..191fc765715ba7 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_model_context.cc +++ b/tensorflow/core/tfrt/ifrt/ifrt_model_context.cc @@ -16,12 +16,8 @@ limitations under the License. #include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h" -#include #include "absl/status/status.h" -#include "absl/strings/string_view.h" -#include "xla/python/ifrt/array.h" -#include "xla/tsl/concurrency/ref_count.h" #include "tsl/platform/errors.h" #include "tsl/platform/threadpool.h" diff --git a/tensorflow/core/tfrt/ifrt/ifrt_model_context.h b/tensorflow/core/tfrt/ifrt/ifrt_model_context.h index 9bf38b8c92cb74..6b4a600b85a3cb 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_model_context.h +++ b/tensorflow/core/tfrt/ifrt/ifrt_model_context.h @@ -17,19 +17,14 @@ limitations under the License. #define TENSORFLOW_CORE_TFRT_IFRT_IFRT_MODEL_CONTEXT_H_ #include -#include #include #include -#include "absl/container/flat_hash_map.h" #include "absl/status/status.h" -#include "absl/status/statusor.h" #include "absl/strings/string_view.h" -#include "absl/synchronization/mutex.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "xla/python/ifrt/array.h" #include "xla/python/ifrt/client.h" -#include "xla/tsl/concurrency/ref_count.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h" #include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h" @@ -65,12 +60,12 @@ class IfrtModelContext { std::shared_ptr client, IfrtServingCoreSelector* ifrt_serving_core_selector, const tsl::thread::ThreadPool* thread_pool, - std::unique_ptr device_mgr, + tensorflow::DeviceMgr* device_mgr, tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn) : client_(std::move(client)), ifrt_serving_core_selector_(ifrt_serving_core_selector), thread_pool_(*thread_pool), - device_mgr_(std::move(device_mgr)), + device_mgr_(device_mgr), shape_representation_fn_(shape_representation_fn) {} void RegisterHandle(ServingExecutableRegistry::Handle handle) { @@ -100,9 +95,7 @@ class IfrtModelContext { return restore_tensor_registry_; } - tensorflow::StaticDeviceMgr* GetDeviceMgr() const { - return device_mgr_.get(); - } + tensorflow::DeviceMgr* GetDeviceMgr() const { return device_mgr_; } IfrtServingCoreSelector* GetIfrtServingCoreSelector() const { return ifrt_serving_core_selector_; } @@ -127,7 +120,7 @@ class IfrtModelContext { IfrtServingCoreSelector* ifrt_serving_core_selector_; // May be nullptr const tsl::thread::ThreadPool& thread_pool_; - std::unique_ptr device_mgr_; + tensorflow::DeviceMgr* device_mgr_ = nullptr; // Not owned. tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn_ = tensorflow::IdentityShapeRepresentationFn(); diff --git a/tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry_test.cc b/tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry_test.cc new file mode 100644 index 00000000000000..de0a27aecc4104 --- /dev/null +++ b/tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry_test.cc @@ -0,0 +1,190 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h" + +#include + +#include +#include +#include "absl/status/status.h" +#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h" +#include "xla/python/ifrt/future.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/framework/tensor_testutil.h" +#include "tensorflow/core/framework/types.pb.h" +#include "tsl/lib/core/status_test_util.h" +#include "tsl/platform/status_matchers.h" +#include "tsl/platform/statusor.h" + +using tsl::testing::IsOk; +using tsl::testing::StatusIs; + +namespace tensorflow { +namespace ifrt_serving { +namespace { + +TEST(IfrtRestoreTensorRegistryTest, RetrieveNonRegisteredTensorFails) { + IfrtRestoreTensorRegistry registry; + EXPECT_THAT(registry.GetRestoredTensor("input_tensor_1").Await(), + StatusIs(absl::StatusCode::kNotFound)); +} + +TEST(IfrtRestoreTensorRegistryTest, + RetrieveNonRegisteredTensorDTypeAndShapeFails) { + IfrtRestoreTensorRegistry registry; + EXPECT_THAT(registry.GetDtypeAndShape("input_tensor_1"), + StatusIs(absl::StatusCode::kNotFound)); +} + +TEST(IfrtRestoreTensorRegistryTest, SetNonExistedTensorAsUsedByHostFails) { + IfrtRestoreTensorRegistry registry; + EXPECT_THAT(registry.SetUsedByHost("input_tensor_1"), + StatusIs(absl::StatusCode::kNotFound)); +} + +TEST(IfrtRestoreTensorRegistryTest, RegisteredExistedTensorFails) { + auto input_tensor = + test::AsTensor({1, 2, 3, 4}, tensorflow::TensorShape({2, 2})); + auto promise = xla::ifrt::Future::CreatePromise(); + auto future = xla::ifrt::Future(promise); + + IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = { + .used_by_host = false, + .dtype_and_shape = + { + .dtype = DT_INT32, + .shape = tensorflow::TensorShape({2, 2}), + }, + .tensor_future = future}; + IfrtRestoreTensorRegistry registry; + EXPECT_THAT(registry.TryRegister("input_tensor_2", restored_tensor_info), + IsOk()); + promise.Set(input_tensor); + EXPECT_THAT(registry.TryRegister("input_tensor_2", restored_tensor_info), + StatusIs(absl::StatusCode::kAlreadyExists)); +} + +TEST(IfrtRestoreTensorRegistryTest, SetTensorAsUsedByHost) { + auto promise = xla::ifrt::Future::CreatePromise(); + auto future = xla::ifrt::Future(promise); + IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = { + .used_by_host = false, + .dtype_and_shape = + { + .dtype = DT_INT32, + .shape = tensorflow::TensorShape({2, 2}), + }, + .tensor_future = future}; + IfrtRestoreTensorRegistry registry; + EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info), + IsOk()); + EXPECT_THAT(registry.SetUsedByHost("input_tensor_1"), IsOk()); +} + +TEST(IfrtRestoreTensorRegistryTest, RegisteredTensorCanBeRetrieved) { + auto input_tensor = + test::AsTensor({1, 2, 3, 4}, tensorflow::TensorShape({2, 2})); + auto promise = xla::ifrt::Future::CreatePromise(); + auto future = xla::ifrt::Future(promise); + + IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = { + .used_by_host = false, + .dtype_and_shape = + { + .dtype = DT_INT32, + .shape = tensorflow::TensorShape({2, 2}), + }, + .tensor_future = future}; + IfrtRestoreTensorRegistry registry; + EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info), + IsOk()); + promise.Set(input_tensor); + TF_ASSERT_OK_AND_ASSIGN(tensorflow::Tensor retrieved, + registry.GetRestoredTensor("input_tensor_1").Await()); + test::ExpectEqual(retrieved, input_tensor); + TF_ASSERT_OK_AND_ASSIGN(DtypeAndShape dtype_and_shape, + registry.GetDtypeAndShape("input_tensor_1")); + EXPECT_TRUE( + dtype_and_shape.shape.IsSameSize(tensorflow::TensorShape({2, 2}))); + EXPECT_EQ(dtype_and_shape.dtype, DT_INT32); +} + +TEST(IfrtRestoreTensorRegistryTest, + RegisteredTensorDTypeAndShapeCanBeRetrieved) { + auto input_tensor = + test::AsTensor({1, 2, 3, 4}, tensorflow::TensorShape({2, 2})); + auto promise = xla::ifrt::Future::CreatePromise(); + auto future = xla::ifrt::Future(promise); + + IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = { + .used_by_host = false, + .dtype_and_shape = + { + .dtype = DT_INT32, + .shape = tensorflow::TensorShape({2, 2}), + }, + .tensor_future = future}; + IfrtRestoreTensorRegistry registry; + EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info), + IsOk()); + TF_ASSERT_OK_AND_ASSIGN(DtypeAndShape dtype_and_shape, + registry.GetDtypeAndShape("input_tensor_1")); + EXPECT_TRUE( + dtype_and_shape.shape.IsSameSize(tensorflow::TensorShape({2, 2}))); + EXPECT_EQ(dtype_and_shape.dtype, DT_INT32); +} + +TEST(IfrtRestoreTensorRegistryTest, FeezeTensorRegistry) { + auto input_tensor = + test::AsTensor({1, 2, 3, 4}, tensorflow::TensorShape({2, 2})); + auto promise1 = xla::ifrt::Future::CreatePromise(); + auto future1 = xla::ifrt::Future(promise1); + auto promise2 = xla::ifrt::Future::CreatePromise(); + auto future2 = xla::ifrt::Future(promise2); + + IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info1 = { + .used_by_host = false, + .dtype_and_shape = + { + .dtype = DT_INT32, + .shape = tensorflow::TensorShape({2, 2}), + }, + .tensor_future = future1}; + IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info2 = { + .used_by_host = true, + .dtype_and_shape = + { + .dtype = DT_INT32, + .shape = tensorflow::TensorShape({2, 2}), + }, + .tensor_future = future2}; + IfrtRestoreTensorRegistry registry; + TF_ASSERT_OK(registry.TryRegister("input_tensor_1", restored_tensor_info1)); + TF_ASSERT_OK(registry.TryRegister("input_tensor_2", restored_tensor_info2)); + promise1.Set(input_tensor); + promise2.Set(input_tensor); + registry.Freeze(); + // Tensor with `used_by_host` set to false will be freed after freeze. + EXPECT_THAT(registry.GetRestoredTensor("input_tensor_1").Await(), + StatusIs(absl::StatusCode::kUnavailable)); + // Tensor with `used_by_host` set to true will be kept after freeze. + TF_ASSERT_OK_AND_ASSIGN(tensorflow::Tensor retrieved, + registry.GetRestoredTensor("input_tensor_2").Await()); + test::ExpectEqual(retrieved, input_tensor); +} +} // namespace +} // namespace ifrt_serving +} // namespace tensorflow diff --git a/tensorflow/core/tfrt/ifrt/ifrt_serving_executable.cc b/tensorflow/core/tfrt/ifrt/ifrt_serving_executable.cc index c85c9a9d223b08..822df677a97240 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_serving_executable.cc +++ b/tensorflow/core/tfrt/ifrt/ifrt_serving_executable.cc @@ -51,6 +51,7 @@ limitations under the License. #include "xla/python/ifrt/device.h" #include "xla/python/ifrt/executable.h" #include "xla/python/ifrt/future.h" +#include "xla/python/ifrt/hlo/hlo_program.h" #include "xla/python/ifrt/host_callback.h" #include "xla/python/ifrt/shape.h" #include "xla/python/ifrt/sharding.h" @@ -164,7 +165,7 @@ IfrtServingExecutable::Create( IfrtLoadedVariableRegistry* ifrt_loaded_variable_registry, const IfrtRestoreTensorRegistry* ifrt_restore, tfrt::ConcurrentWorkQueue* checkpoint_loader_queue, - tensorflow::StaticDeviceMgr* device_mgr, + tensorflow::DeviceMgr* device_mgr, tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn, IfrtServingCoreSelector* ifrt_serving_core_selector) { TF_ASSIGN_OR_RETURN( @@ -236,7 +237,7 @@ GroupHostCallbackByKey(const Tf2HloResult& tf2hlo_result) { // TODO: shape propagation in module absl::StatusOr BuildHostCallback( absl::string_view key, const HostCallbackBuilderInfo& builder_info, - mlir::ModuleOp module, tensorflow::StaticDeviceMgr* device_mgr, + mlir::ModuleOp module, tensorflow::DeviceMgr* device_mgr, std::vector>& tf_host_callbacks) { VLOG(2) << "BuildHostCallback for key: " << key; @@ -309,7 +310,7 @@ absl::StatusOr BuildHostCallback( absl::StatusOr> BuildHostCallbacks( const Tf2HloResult& tf2hlo_result, mlir::ModuleOp module, - tensorflow::StaticDeviceMgr* device_mgr, + tensorflow::DeviceMgr* device_mgr, std::vector>& tf_host_callbacks) { TF_ASSIGN_OR_RETURN(auto host_callback_maps, GroupHostCallbackByKey(tf2hlo_result)); @@ -386,7 +387,7 @@ IfrtServingExecutable::CreateExecutableSynchronously( TF_ASSIGN_OR_RETURN( std::unique_ptr ifrt_executable, ifrt_client_->GetDefaultCompiler()->Compile( - std::make_unique( + std::make_unique( tf2hlo_result.mlir_hlo_module.get()), std::make_unique( xla_compile_options, loaded_host_callbacks))); diff --git a/tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h b/tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h index 5a336689073cd6..f7a67e680a07e0 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h +++ b/tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h @@ -25,14 +25,12 @@ limitations under the License. #include "absl/base/thread_annotations.h" #include "absl/container/flat_hash_map.h" -#include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "absl/types/span.h" #include "mlir/IR/BuiltinOps.h" // from @llvm-project -#include "mlir/IR/MLIRContext.h" // from @llvm-project #include "mlir/IR/OwningOpRef.h" // from @llvm-project #include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" @@ -70,7 +68,7 @@ class IfrtServingExecutable { IfrtLoadedVariableRegistry* ifrt_loaded_variable_registry, const IfrtRestoreTensorRegistry* ifrt_restore, tfrt::ConcurrentWorkQueue* checkpoint_loader_queue, - tensorflow::StaticDeviceMgr* device_mgr, + tensorflow::DeviceMgr* device_mgr, tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn, IfrtServingCoreSelector* ifrt_serving_core_selector); @@ -140,7 +138,7 @@ class IfrtServingExecutable { IfrtLoadedVariableRegistry* ifrt_loaded_variable_registry, const IfrtRestoreTensorRegistry* ifrt_restore_tensor_registry, tfrt::ConcurrentWorkQueue* checkpoint_loader_queue, - tensorflow::StaticDeviceMgr* device_mgr, + tensorflow::DeviceMgr* device_mgr, tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn, IfrtServingCoreSelector* ifrt_serving_core_selector, tensorflow::tpu::TPUCompileMetadataProto original_compile_metadata) @@ -176,7 +174,7 @@ class IfrtServingExecutable { IfrtLoadedVariableRegistry& ifrt_loaded_variable_registry_; const IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry_; tfrt::ConcurrentWorkQueue* checkpoint_loader_queue_; - tensorflow::StaticDeviceMgr* device_mgr_; // Not owned. For host callback. + tensorflow::DeviceMgr* device_mgr_; // Not owned. For host callback. tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn_; IfrtServingCoreSelector* ifrt_serving_core_selector_; diff --git a/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test.cc b/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test.cc index 5de01ff67139ae..aca38747cff03d 100644 --- a/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test.cc +++ b/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test.cc @@ -29,44 +29,36 @@ limitations under the License. #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" -#include "mlir/IR/BuiltinOps.h" // from @llvm-project -#include "mlir/IR/DialectRegistry.h" // from @llvm-project -#include "mlir/IR/MLIRContext.h" // from @llvm-project -#include "mlir/IR/OwningOpRef.h" // from @llvm-project -#include "mlir/InitAllDialects.h" // from @llvm-project -#include "mlir/Parser/Parser.h" // from @llvm-project -#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h" -#include "tensorflow/compiler/tf2xla/xla_helpers.h" -#include "xla/hlo/ir/hlo_sharding.h" #include "xla/python/ifrt/array.h" #include "xla/python/ifrt/client.h" #include "xla/python/ifrt/future.h" #include "xla/python/ifrt/test_util.h" -#include "xla/tsl/concurrency/ref_count.h" -#include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_matcher.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" -#include "tensorflow/core/platform/resource_loader.h" #include "tensorflow/core/platform/test.h" -#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h" #include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h" #include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h" -#include "tensorflow/core/tfrt/ifrt/sharding_utils.h" -#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h" +#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h" #include "tsl/framework/serving_device_selector.h" #include "tsl/framework/test_util/mock_serving_device_selector.h" #include "tsl/platform/env.h" #include "tsl/platform/statusor.h" #include "tsl/platform/threadpool.h" #include "tsl/platform/tstring.h" -#include "tfrt/host_context/concurrent_work_queue.h" // from @tf_runtime namespace tensorflow { namespace ifrt_serving { namespace { +using tensorflow::ifrt_serving::test_utils::GetMlirModulePath; +using ::tensorflow::test::AsTensor; +using ::tensorflow::test::TensorEq; +using ::testing::ElementsAre; +using ::testing::Return; +using ::tsl::testing::StatusIs; + struct VariableInputTestParam { std::vector in_tensors; std::vector @@ -76,12 +68,6 @@ struct VariableInputTestParam { }; using VariableInputTest = ::testing::TestWithParam; -using ::tensorflow::test::AsTensor; -using ::tensorflow::test::TensorEq; -using ::testing::ElementsAre; -using ::testing::Return; -using ::tsl::testing::StatusIs; - const tsl::thread::ThreadPool& GetThreadPool() { constexpr int kMaxParallelism = 16; static auto* const thread_pool = @@ -89,7 +75,6 @@ const tsl::thread::ThreadPool& GetThreadPool() { "IfrtSharding", kMaxParallelism); return *thread_pool; } - class IfrtServingExecutableTest : public ::testing::Test { protected: explicit IfrtServingExecutableTest() { @@ -106,48 +91,13 @@ class IfrtServingExecutableTest : public ::testing::Test { }; TEST_F(IfrtServingExecutableTest, Basic) { - // Create test input module - constexpr absl::string_view kDataDirectory = - "tensorflow/core/tfrt/ifrt/testdata"; - std::string mlir_module_path = tensorflow::GetDataDependencyFilepath( - absl::StrCat(kDataDirectory, "/executable.mlir")); - - mlir::DialectRegistry registry; - mlir::registerAllDialects(registry); - mlir::RegisterAllTensorFlowDialects(registry); - - mlir::MLIRContext context(registry); - - mlir::OwningOpRef mlir_module = - mlir::parseSourceFile(mlir_module_path, &context); - - ASSERT_TRUE(mlir_module); - + test_utils::IfrtServingExecutableTestHelper helper(&selector_); int64_t program_id = 123456; EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))) .Times(1) .WillOnce(Return(tsl::DeviceReservation(0, /*selector=*/nullptr))); - - // Create contexts required for the compiler execution. - TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr client, - xla::ifrt::test_util::GetClient()); - - IfrtLoadedVariableRegistry ifrt_loaded_variable_registry; - IfrtRestoreTensorRegistry ifrt_restore_tensor_registry; - std::unique_ptr work_queue = - tfrt::CreateMultiThreadedWorkQueue( - /*num_threads=*/4, /*num_blocking_threads=*/4); - TF_ASSERT_OK_AND_ASSIGN( - std::unique_ptr device_mgr, - CreateTfStaticDeviceMgr()); - - TF_ASSERT_OK_AND_ASSIGN( - auto executable, - IfrtServingExecutable ::Create( - program_id, "test", "main", std::move(mlir_module), client, - &GetThreadPool(), &ifrt_loaded_variable_registry, - &ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(), - tensorflow::IdentityShapeRepresentationFn(), core_selector_.get())); + auto executable = + helper.MakeExecutable(program_id, GetMlirModulePath("executable.mlir")); auto x = AsTensor({1, 2, 3}, tensorflow::TensorShape({1, 3})); auto y = AsTensor({1, 2, 3}, tensorflow::TensorShape({3, 1})); @@ -163,49 +113,14 @@ TEST_F(IfrtServingExecutableTest, Basic) { } TEST_F(IfrtServingExecutableTest, MultipleShapes) { - // Create test input module - constexpr absl::string_view kDataDirectory = - "tensorflow/core/tfrt/ifrt/testdata"; - std::string mlir_module_path = tensorflow::GetDataDependencyFilepath( - absl::StrCat(kDataDirectory, "/executable.mlir")); - - mlir::DialectRegistry registry; - mlir::registerAllDialects(registry); - mlir::RegisterAllTensorFlowDialects(registry); - - mlir::MLIRContext context(registry); - - mlir::OwningOpRef mlir_module = - mlir::parseSourceFile(mlir_module_path, &context); - - ASSERT_TRUE(mlir_module); - + test_utils::IfrtServingExecutableTestHelper helper(&selector_); int64_t program_id = 123456; EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))) .Times(6) .WillRepeatedly( [](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); }); - - // Create contexts required for the compiler execution. - TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr client, - xla::ifrt::test_util::GetClient()); - - IfrtLoadedVariableRegistry ifrt_loaded_variable_registry; - IfrtRestoreTensorRegistry ifrt_restore_tensor_registry; - std::unique_ptr work_queue = - tfrt::CreateMultiThreadedWorkQueue( - /*num_threads=*/4, /*num_blocking_threads=*/4); - TF_ASSERT_OK_AND_ASSIGN( - std::unique_ptr device_mgr, - CreateTfStaticDeviceMgr()); - - TF_ASSERT_OK_AND_ASSIGN( - auto executable, - IfrtServingExecutable ::Create( - program_id, "test", "main", std::move(mlir_module), client, - &GetThreadPool(), &ifrt_loaded_variable_registry, - &ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(), - tensorflow::IdentityShapeRepresentationFn(), core_selector_.get())); + auto executable = + helper.MakeExecutable(program_id, GetMlirModulePath("executable.mlir")); auto x1 = AsTensor({1, 2, 3}, tensorflow::TensorShape({1, 3})); auto y1 = AsTensor({1, 2, 3}, tensorflow::TensorShape({3, 1})); @@ -236,49 +151,14 @@ TEST_F(IfrtServingExecutableTest, MultipleShapes) { } TEST_F(IfrtServingExecutableTest, ReturnFailOnUncompiledShapeAfterFrozen) { - // Create test input module - constexpr absl::string_view kDataDirectory = - "tensorflow/core/tfrt/ifrt/testdata"; - std::string mlir_module_path = tensorflow::GetDataDependencyFilepath( - absl::StrCat(kDataDirectory, "/executable.mlir")); - - mlir::DialectRegistry registry; - mlir::registerAllDialects(registry); - mlir::RegisterAllTensorFlowDialects(registry); - - mlir::MLIRContext context(registry); - - mlir::OwningOpRef mlir_module = - mlir::parseSourceFile(mlir_module_path, &context); - - ASSERT_TRUE(mlir_module); - + test_utils::IfrtServingExecutableTestHelper helper(&selector_); int64_t program_id = 123456; EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))) .Times(3) .WillRepeatedly( [](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); }); - - // Create contexts required for the compiler execution. - TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr client, - xla::ifrt::test_util::GetClient()); - - IfrtLoadedVariableRegistry ifrt_loaded_variable_registry; - IfrtRestoreTensorRegistry ifrt_restore_tensor_registry; - std::unique_ptr work_queue = - tfrt::CreateMultiThreadedWorkQueue( - /*num_threads=*/4, /*num_blocking_threads=*/4); - TF_ASSERT_OK_AND_ASSIGN( - std::unique_ptr device_mgr, - CreateTfStaticDeviceMgr()); - - TF_ASSERT_OK_AND_ASSIGN( - auto executable, - IfrtServingExecutable ::Create( - program_id, "test", "main", std::move(mlir_module), client, - &GetThreadPool(), &ifrt_loaded_variable_registry, - &ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(), - tensorflow::IdentityShapeRepresentationFn(), core_selector_.get())); + auto executable = + helper.MakeExecutable(program_id, GetMlirModulePath("executable.mlir")); auto x1 = AsTensor({1, 2, 3}, tensorflow::TensorShape({1, 3})); auto y1 = AsTensor({1, 2, 3}, tensorflow::TensorShape({3, 1})); @@ -310,47 +190,11 @@ TEST_F(IfrtServingExecutableTest, ReturnFailOnUncompiledShapeAfterFrozen) { } TEST_F(IfrtServingExecutableTest, Spmd) { - // Create test input module - constexpr absl::string_view kDataDirectory = - "tensorflow/core/tfrt/ifrt/testdata"; - std::string mlir_module_path = tensorflow::GetDataDependencyFilepath( - absl::StrCat(kDataDirectory, "/spmd_executable.mlir")); - - mlir::DialectRegistry registry; - mlir::registerAllDialects(registry); - mlir::RegisterAllTensorFlowDialects(registry); - - mlir::MLIRContext context(registry); - - mlir::OwningOpRef mlir_module = - mlir::parseSourceFile(mlir_module_path, &context); - - ASSERT_TRUE(mlir_module); - + test_utils::IfrtServingExecutableTestHelper helper(&selector_); int64_t program_id = 111111; - EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))).Times(0); - - // Create contexts required for the compiler execution. - TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr client, - xla::ifrt::test_util::GetClient()); - - IfrtLoadedVariableRegistry ifrt_loaded_variable_registry; - IfrtRestoreTensorRegistry ifrt_restore_tensor_registry; - std::unique_ptr work_queue = - tfrt::CreateMultiThreadedWorkQueue( - /*num_threads=*/4, /*num_blocking_threads=*/4); - TF_ASSERT_OK_AND_ASSIGN( - std::unique_ptr device_mgr, - CreateTfStaticDeviceMgr()); - - TF_ASSERT_OK_AND_ASSIGN( - auto executable, - IfrtServingExecutable ::Create( - program_id, "test", "main", std::move(mlir_module), client, - &GetThreadPool(), &ifrt_loaded_variable_registry, - &ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(), - tensorflow::IdentityShapeRepresentationFn(), core_selector_.get())); + auto executable = helper.MakeExecutable( + program_id, GetMlirModulePath("spmd_executable.mlir")); auto x = AsTensor({1, 2, 3, 4, 5, 6, 7, 8}, tensorflow::TensorShape({4, 2})); @@ -371,47 +215,11 @@ TEST_F(IfrtServingExecutableTest, Spmd) { } TEST_F(IfrtServingExecutableTest, SpmdTwoReturns) { - // Create test input module - constexpr absl::string_view kDataDirectory = - "tensorflow/core/tfrt/ifrt/testdata"; - std::string mlir_module_path = tensorflow::GetDataDependencyFilepath( - absl::StrCat(kDataDirectory, "/spmd_executable_two_returns.mlir")); - - mlir::DialectRegistry registry; - mlir::registerAllDialects(registry); - mlir::RegisterAllTensorFlowDialects(registry); - - mlir::MLIRContext context(registry); - - mlir::OwningOpRef mlir_module = - mlir::parseSourceFile(mlir_module_path, &context); - - ASSERT_TRUE(mlir_module); - + test_utils::IfrtServingExecutableTestHelper helper(&selector_); int64_t program_id = 111111; - EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))).Times(0); - - // Create contexts required for the compiler execution. - TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr client, - xla::ifrt::test_util::GetClient()); - - IfrtLoadedVariableRegistry ifrt_loaded_variable_registry; - IfrtRestoreTensorRegistry ifrt_restore_tensor_registry; - std::unique_ptr work_queue = - tfrt::CreateMultiThreadedWorkQueue( - /*num_threads=*/4, /*num_blocking_threads=*/4); - TF_ASSERT_OK_AND_ASSIGN( - std::unique_ptr device_mgr, - CreateTfStaticDeviceMgr()); - - TF_ASSERT_OK_AND_ASSIGN( - auto executable, - IfrtServingExecutable ::Create( - program_id, "test", "main", std::move(mlir_module), client, - &GetThreadPool(), &ifrt_loaded_variable_registry, - &ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(), - tensorflow::IdentityShapeRepresentationFn(), core_selector_.get())); + auto executable = helper.MakeExecutable( + program_id, GetMlirModulePath("spmd_executable_two_returns.mlir")); auto x = AsTensor({1, 2, 3, 4, 5, 6, 7, 8}, tensorflow::TensorShape({4, 2})); @@ -436,50 +244,14 @@ TEST_F(IfrtServingExecutableTest, SpmdTwoReturns) { } TEST_F(IfrtServingExecutableTest, NoReturn) { - // Create test input module - constexpr absl::string_view kDataDirectory = - "tensorflow/core/tfrt/ifrt/testdata"; - std::string mlir_module_path = tensorflow::GetDataDependencyFilepath( - absl::StrCat(kDataDirectory, "/executable_no_return.mlir")); - - mlir::DialectRegistry registry; - mlir::registerAllDialects(registry); - mlir::RegisterAllTensorFlowDialects(registry); - - mlir::MLIRContext context(registry); - - mlir::OwningOpRef mlir_module = - mlir::parseSourceFile(mlir_module_path, &context); - - ASSERT_TRUE(mlir_module); - + test_utils::IfrtServingExecutableTestHelper helper(&selector_); int64_t program_id = 111111; - EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))) .Times(1) .WillRepeatedly( [](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); }); - - // Create contexts required for the compiler execution. - TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr client, - xla::ifrt::test_util::GetClient()); - - IfrtLoadedVariableRegistry ifrt_loaded_variable_registry; - IfrtRestoreTensorRegistry ifrt_restore_tensor_registry; - std::unique_ptr work_queue = - tfrt::CreateMultiThreadedWorkQueue( - /*num_threads=*/4, /*num_blocking_threads=*/4); - TF_ASSERT_OK_AND_ASSIGN( - std::unique_ptr device_mgr, - CreateTfStaticDeviceMgr()); - - TF_ASSERT_OK_AND_ASSIGN( - auto executable, - IfrtServingExecutable ::Create( - program_id, "test", "main", std::move(mlir_module), client, - &GetThreadPool(), &ifrt_loaded_variable_registry, - &ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(), - tensorflow::IdentityShapeRepresentationFn(), core_selector_.get())); + auto executable = helper.MakeExecutable( + program_id, GetMlirModulePath("executable_no_return.mlir")); auto x = AsTensor({1, 2, 3}, tensorflow::TensorShape({1, 3})); auto y = AsTensor({1, 2, 3}, tensorflow::TensorShape({3, 1})); @@ -492,52 +264,17 @@ TEST_F(IfrtServingExecutableTest, NoReturn) { } TEST_P(VariableInputTest, InterleaveVariable) { - // Create test input module - constexpr absl::string_view kDataDirectory = - "tensorflow/core/tfrt/ifrt/testdata"; - std::string mlir_module_path = tensorflow::GetDataDependencyFilepath( - absl::StrCat(kDataDirectory, "/executable_long_inputs.mlir")); - - mlir::DialectRegistry registry; - mlir::registerAllDialects(registry); - mlir::RegisterAllTensorFlowDialects(registry); - - mlir::MLIRContext context(registry); - - mlir::OwningOpRef mlir_module = - mlir::parseSourceFile(mlir_module_path, &context); - - ASSERT_TRUE(mlir_module); - tsl::test_util::MockServingDeviceSelector device_selector; - IfrtServingCoreSelector core_selector(&device_selector); + test_utils::IfrtServingExecutableTestHelper helper(&device_selector); int64_t program_id = 111111; - EXPECT_CALL(device_selector, ReserveDevice(absl::StrCat(program_id))) .Times(1) .WillRepeatedly( [](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); }); - - // Create contexts required for the compiler execution. - TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr client, - xla::ifrt::test_util::GetClient()); - - IfrtLoadedVariableRegistry ifrt_loaded_variable_registry; - IfrtRestoreTensorRegistry ifrt_restore_tensor_registry; - std::unique_ptr work_queue = - tfrt::CreateMultiThreadedWorkQueue( - /*num_threads=*/4, /*num_blocking_threads=*/4); - TF_ASSERT_OK_AND_ASSIGN( - std::unique_ptr device_mgr, - CreateTfStaticDeviceMgr()); - - TF_ASSERT_OK_AND_ASSIGN( - auto executable, - IfrtServingExecutable ::Create( - program_id, "test", "main", std::move(mlir_module), client, - &GetThreadPool(), &ifrt_loaded_variable_registry, - &ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(), - tensorflow::IdentityShapeRepresentationFn(), &core_selector)); + auto executable = helper.MakeExecutable( + program_id, GetMlirModulePath("executable_long_inputs.mlir")); + IfrtRestoreTensorRegistry* ifrt_restore_tensor_registry = + helper.ifrt_restore_tensor_registry(); std::vector inputs; std::vector loaded_variable_indices; @@ -552,8 +289,8 @@ TEST_P(VariableInputTest, InterleaveVariable) { .shape = GetParam().in_tensors[i].shape()}, .tensor_future = input_tensor_future}; std::string variable_name = absl::StrCat("variable_", i); - ASSERT_OK(ifrt_restore_tensor_registry.TryRegister(variable_name, - restore_tensor_info)); + ASSERT_OK(ifrt_restore_tensor_registry->TryRegister(variable_name, + restore_tensor_info)); loaded_variable_indices.push_back(i); input_tensor_promise.Set(GetParam().in_tensors[i]); // Use string tensor containing the key (name) in place of variable diff --git a/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.cc b/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.cc new file mode 100644 index 00000000000000..49ca7b3a9bbd98 --- /dev/null +++ b/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.cc @@ -0,0 +1,95 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h" + +#include +#include +#include +#include + +#include "absl/status/statusor.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "mlir/IR/BuiltinOps.h" // from @llvm-project +#include "mlir/IR/MLIRContext.h" // from @llvm-project +#include "mlir/IR/OwningOpRef.h" // from @llvm-project +#include "mlir/InitAllDialects.h" // from @llvm-project +#include "mlir/Parser/Parser.h" // from @llvm-project +#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h" +#include "tensorflow/compiler/tf2xla/xla_helpers.h" +#include "xla/python/ifrt/test_util.h" +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/platform/resource_loader.h" +#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h" +#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h" +#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h" +#include "tsl/framework/test_util/mock_serving_device_selector.h" +#include "tsl/platform/env.h" +#include "tsl/platform/status.h" +#include "tsl/platform/threadpool.h" +#include "tfrt/host_context/concurrent_work_queue.h" // from @tf_runtime +namespace tensorflow { +namespace ifrt_serving { +namespace test_utils { + +inline constexpr absl::string_view kMlirModulePath = + "tensorflow/core/tfrt/ifrt/testdata/"; + +std::string GetMlirModulePath(absl::string_view module_name) { + return tensorflow::GetDataDependencyFilepath( + absl::StrCat(kMlirModulePath, module_name)); +} + +IfrtServingExecutableTestHelper::IfrtServingExecutableTestHelper( + tsl::test_util::MockServingDeviceSelector* device_selector) + : device_selector_(device_selector) { + core_selector_ = std::make_unique(device_selector_); + auto client_or = xla::ifrt::test_util::GetClient(); + TF_CHECK_OK(client_or.status()); + client_ = std::move(client_or.value()); + + thread_pool_ = std::make_unique( + tsl::Env::Default(), tsl::ThreadOptions(), "IfrtSharding", + kThreadPoolNumThreads); + work_queue_ = tfrt::CreateMultiThreadedWorkQueue( + /*num_threads=*/4, /*num_blocking_threads=*/4); + + auto device_mgr_or = ifrt_serving::CreateTfStaticDeviceMgr(); + TF_CHECK_OK(device_mgr_or.status()); + device_mgr_ = std::move(device_mgr_or.value()); + + mlir::registerAllDialects(registry_); + mlir::RegisterAllTensorFlowDialects(registry_); + context_ = std::make_unique(registry_); +} + +std::unique_ptr +IfrtServingExecutableTestHelper::MakeExecutable(int64_t program_id, + std::string mlir_module_path) { + auto mlir_module = + mlir::parseSourceFile(mlir_module_path, context_.get()); + auto executable_or = IfrtServingExecutable::Create( + program_id, "test", "main", std::move(mlir_module), client_, + thread_pool_.get(), &ifrt_loaded_variable_registry_, + &ifrt_restore_tensor_registry_, work_queue_.get(), device_mgr_.get(), + tensorflow::IdentityShapeRepresentationFn(), core_selector_.get()); + TF_CHECK_OK(executable_or.status()); + return std::move(executable_or.value()); +} + +} // namespace test_utils +} // namespace ifrt_serving +} // namespace tensorflow diff --git a/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h b/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h new file mode 100644 index 00000000000000..2fc669c08a5062 --- /dev/null +++ b/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h @@ -0,0 +1,82 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_CORE_TFRT_IFRT_IFRT_SERVING_EXECUTABLE_TEST_UTIL_H_ +#define TENSORFLOW_CORE_TFRT_IFRT_IFRT_SERVING_EXECUTABLE_TEST_UTIL_H_ + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "mlir/IR/DialectRegistry.h" // from @llvm-project +#include "mlir/IR/MLIRContext.h" // from @llvm-project +#include "xla/python/ifrt/array.h" +#include "xla/python/ifrt/client.h" +#include "xla/python/ifrt/test_util.h" +#include "tensorflow/core/common_runtime/device_mgr.h" +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h" +#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h" +#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h" +#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h" +#include "tsl/framework/test_util/mock_serving_device_selector.h" +#include "tsl/platform/threadpool.h" +#include "tfrt/host_context/concurrent_work_queue.h" // from @tf_runtime + +namespace tensorflow { +namespace ifrt_serving { +namespace test_utils { + +// A test helper class to create and IfrtServingExecutable. +class IfrtServingExecutableTestHelper { + public: + explicit IfrtServingExecutableTestHelper( + tsl::test_util::MockServingDeviceSelector* device_selector); + + // Creates an IfrtServingExecutable with the given program id. + // Note the instance of this class must outlive the returned + // IfrtServingExecutable. + std::unique_ptr MakeExecutable( + int64_t program_id, std::string mlir_module_path); + + IfrtRestoreTensorRegistry* ifrt_restore_tensor_registry() { + return &ifrt_restore_tensor_registry_; + } + + private: + static constexpr int kThreadPoolNumThreads = 16; + + tsl::test_util::MockServingDeviceSelector* device_selector_; // Not owned. + std::unique_ptr core_selector_; + std::shared_ptr client_; + std::unique_ptr thread_pool_; + IfrtLoadedVariableRegistry ifrt_loaded_variable_registry_; + IfrtRestoreTensorRegistry ifrt_restore_tensor_registry_; + std::unique_ptr work_queue_; + std::unique_ptr device_mgr_; + + mlir::DialectRegistry registry_; + std::unique_ptr context_; +}; + +// Returns the path to the MLIR module for the given module name. +std::string GetMlirModulePath(absl::string_view module_name); + +} // namespace test_utils +} // namespace ifrt_serving +} // namespace tensorflow + +#endif // TENSORFLOW_CORE_TFRT_IFRT_IFRT_SERVING_EXECUTABLE_TEST_UTIL_H_ diff --git a/tensorflow/core/tfrt/ifrt/sharding_utils.cc b/tensorflow/core/tfrt/ifrt/sharding_utils.cc index 03cc5f953edb38..73ddec940649e2 100644 --- a/tensorflow/core/tfrt/ifrt/sharding_utils.cc +++ b/tensorflow/core/tfrt/ifrt/sharding_utils.cc @@ -52,7 +52,6 @@ limitations under the License. #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/status.h" -#include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/tfrt/ifrt/ifrt_tensor_utils.h" #include "tensorflow/core/tpu/kernels/sharding_utils.h" #include "tsl/platform/errors.h" @@ -174,8 +173,8 @@ SplitAndCreateArraysFromHostBuffer( kImmutableUntilTransferCompletes, [tensor, slice_idx]() { // Keep tensor alive - LOG(INFO) << "Done with host buffer for slice " << slice_idx - << " at " << tensor.data(); + VLOG(2) << "Done with host buffer for slice " << slice_idx + << " at " << tensor.data(); })); arrays.push_back(std::move(array)); device_iter++; diff --git a/tensorflow/core/tfrt/ifrt/sharding_utils.h b/tensorflow/core/tfrt/ifrt/sharding_utils.h index 96c99a061d2855..12f04bdc990692 100644 --- a/tensorflow/core/tfrt/ifrt/sharding_utils.h +++ b/tensorflow/core/tfrt/ifrt/sharding_utils.h @@ -17,15 +17,13 @@ limitations under the License. #define TENSORFLOW_CORE_TFRT_IFRT_SHARDING_UTILS_H_ #include "absl/status/statusor.h" -#include "xla/executable_run_options.h" +#include "absl/types/span.h" #include "xla/hlo/ir/hlo_sharding.h" #include "xla/python/ifrt/array.h" #include "xla/python/ifrt/client.h" #include "xla/python/ifrt/device.h" -#include "xla/python/ifrt/future.h" #include "xla/tsl/concurrency/ref_count.h" #include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/platform/statusor.h" #include "tsl/platform/threadpool.h" namespace tensorflow { diff --git a/tensorflow/core/tfrt/ifrt/sharding_utils_test.cc b/tensorflow/core/tfrt/ifrt/sharding_utils_test.cc index c0fc64851868e9..ded49b2c0f14c0 100644 --- a/tensorflow/core/tfrt/ifrt/sharding_utils_test.cc +++ b/tensorflow/core/tfrt/ifrt/sharding_utils_test.cc @@ -28,6 +28,7 @@ limitations under the License. #include "xla/python/ifrt/array.h" #include "xla/python/ifrt/client.h" #include "xla/python/ifrt/device.h" +#include "xla/python/ifrt/memory.h" #include "xla/python/ifrt/shape.h" #include "xla/python/ifrt/sharding.h" #include "xla/python/ifrt/test_util.h" diff --git a/tensorflow/core/tfrt/ifrt/testdata/BUILD b/tensorflow/core/tfrt/ifrt/testdata/BUILD index 948ce54ab983a7..0fcbd0de55f217 100644 --- a/tensorflow/core/tfrt/ifrt/testdata/BUILD +++ b/tensorflow/core/tfrt/ifrt/testdata/BUILD @@ -1,6 +1,9 @@ package( # copybara:uncomment default_applicable_licenses = ["//tensorflow:license"], - default_visibility = ["//tensorflow/core/tfrt/ifrt:__subpackages__"], + default_visibility = [ + "//tensorflow/core/tfrt/ifrt:__subpackages__", + "//tensorflow/core/tfrt/kernels:__subpackages__", + ], licenses = ["notice"], ) diff --git a/tensorflow/core/tfrt/ifrt/tf_host_callback.cc b/tensorflow/core/tfrt/ifrt/tf_host_callback.cc index 084d1ea1a3ec7a..5c5a48f4fc52b4 100644 --- a/tensorflow/core/tfrt/ifrt/tf_host_callback.cc +++ b/tensorflow/core/tfrt/ifrt/tf_host_callback.cc @@ -127,7 +127,7 @@ absl::StatusOr> TfHostCallback::Create( absl::string_view entry_function_name, absl::Span operand_type_and_shapes, absl::Span result_type_and_shapes, - tensorflow::StaticDeviceMgr* device_mgr) { + tensorflow::DeviceMgr* device_mgr) { tensorflow::SessionOptions options; // Explicitly disable non-CPU devices to avoid triggering TPU device // initialization inside TF. diff --git a/tensorflow/core/tfrt/ifrt/tf_host_callback.h b/tensorflow/core/tfrt/ifrt/tf_host_callback.h index ddb00050e8df58..a78b0e5d0aecea 100644 --- a/tensorflow/core/tfrt/ifrt/tf_host_callback.h +++ b/tensorflow/core/tfrt/ifrt/tf_host_callback.h @@ -45,7 +45,7 @@ class TfHostCallback { absl::string_view entry_function_name, absl::Span operand_type_and_shapes, absl::Span result_type_and_shapes, - tensorflow::StaticDeviceMgr* device_mgr); + tensorflow::DeviceMgr* device_mgr); // The host callback function takes two pointer arrays, each element of which // points to allocated host buffer in host layout according to corresponding diff --git a/tensorflow/core/tfrt/kernels/BUILD b/tensorflow/core/tfrt/kernels/BUILD index fa342c7a6bcb17..a6c6ba58c6659a 100644 --- a/tensorflow/core/tfrt/kernels/BUILD +++ b/tensorflow/core/tfrt/kernels/BUILD @@ -34,6 +34,46 @@ cc_library( alwayslink = 1, ) +tf_cc_test( + name = "ifrt_program_ops_test", + srcs = ["ifrt_program_ops_test.cc"], + data = [ + "//tensorflow/core/tfrt/ifrt/testdata", + ], + tags = ["no_oss"], + deps = [ + ":ifrt_program_ops", + "//tensorflow/compiler/tf2xla:xla_helpers", + "//tensorflow/core:core_cpu_base", + "//tensorflow/core:framework", + "//tensorflow/core:test", + "//tensorflow/core:test_main", + "//tensorflow/core:testlib", + "//tensorflow/core/framework:tensor_matcher", + "//tensorflow/core/framework:types_proto_cc", + "//tensorflow/core/lib/gtl:cleanup", + "//tensorflow/core/platform:status", + "//tensorflow/core/tfrt/ifrt:ifrt_executable_registry", + "//tensorflow/core/tfrt/ifrt:ifrt_serving_executable_test_util", + "//tensorflow/core/tfrt/ops:ifrt_program_ops_op_lib", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/status:statusor", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:string_view", + "@com_google_absl//absl/types:span", + "@com_google_googletest//:gtest_main", + "@local_tsl//tsl/framework:serving_device_selector", + "@local_tsl//tsl/framework/test_util:mock_serving_device_selector", + "@local_tsl//tsl/lib/core:status_test_util", + "@local_tsl//tsl/platform:status", + "@local_tsl//tsl/platform:statusor", + "@local_xla//xla/pjrt/cpu:cpu_client", + "@local_xla//xla/python/ifrt", + "@local_xla//xla/python/ifrt:test_util", + "@local_xla//xla/python/pjrt_ifrt", + ], +) + cc_library( name = "stream_ops", srcs = ["stream_ops.cc"], diff --git a/tensorflow/core/tfrt/kernels/ifrt_program_ops_test.cc b/tensorflow/core/tfrt/kernels/ifrt_program_ops_test.cc new file mode 100644 index 00000000000000..292bae8da04685 --- /dev/null +++ b/tensorflow/core/tfrt/kernels/ifrt_program_ops_test.cc @@ -0,0 +1,121 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include +#include +#include + +#include +#include +#include "absl/log/check.h" +#include "absl/status/statusor.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "absl/types/span.h" +#include "xla/pjrt/cpu/cpu_client.h" +#include "xla/python/ifrt/array.h" +#include "xla/python/ifrt/client.h" +#include "xla/python/ifrt/test_util.h" +#include "xla/python/pjrt_ifrt/pjrt_client.h" +#include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/node_def_builder.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_matcher.h" +#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/framework/tensor_testutil.h" +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/kernels/ops_testutil.h" +#include "tensorflow/core/lib/gtl/cleanup.h" +#include "tensorflow/core/platform/status.h" +#include "tensorflow/core/platform/test.h" +#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h" +#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h" +#include "tsl/framework/serving_device_selector.h" +#include "tsl/framework/test_util/mock_serving_device_selector.h" +#include "tsl/lib/core/status_test_util.h" +#include "tsl/platform/status.h" +#include "tsl/platform/statusor.h" + +namespace tensorflow { +namespace tfrt_stub { +namespace { + +using tensorflow::ifrt_serving::ServingExecutableRegistry; +using tensorflow::ifrt_serving::test_utils::GetMlirModulePath; +using tensorflow::ifrt_serving::test_utils::IfrtServingExecutableTestHelper; +using tensorflow::test::AsTensor; +using tensorflow::test::TensorEq; +using ::testing::Return; + +const bool kUnused = + (xla::ifrt::test_util::RegisterClientFactory( + []() -> absl::StatusOr> { + xla::CpuClientOptions options; + options.cpu_device_count = 4; + TF_ASSIGN_OR_RETURN(auto pjrt_client, + xla::GetTfrtCpuClient(options)); + return std::shared_ptr( + xla::ifrt::PjRtClient::Create(std::move(pjrt_client))); + }), + true); + +class IfrtCallOpTest : public OpsTestBase { + protected: + Status Init(int64_t program_id, int num_inputs, DataType input_type, + const std::vector& variable_arg_indices, + const std::vector& output_type_list) { + TF_CHECK_OK(NodeDefBuilder("op", "IfrtCall") + .Input(FakeInput(num_inputs, input_type)) + .Attr("program_id", program_id) + .Attr("variable_arg_indices", variable_arg_indices) + .Attr("Tout", output_type_list) + .Finalize(node_def())); + return InitOp(); + } +}; + +TEST_F(IfrtCallOpTest, Basic) { + int64_t program_id = 123; + TF_ASSERT_OK(Init( + /*program_id=*/program_id, + /*num_inputs=*/2, + /*input_type=*/DT_INT32, + /*variable_arg_indices=*/{}, + /*output_type_list=*/{DT_INT32})); + + tsl::test_util::MockServingDeviceSelector selector; + IfrtServingExecutableTestHelper helper(&selector); + EXPECT_CALL(selector, ReserveDevice(absl::StrCat(program_id))) + .Times(1) + .WillOnce(Return(tsl::DeviceReservation(0, /*selector=*/nullptr))); + auto executable = + helper.MakeExecutable(program_id, GetMlirModulePath("executable.mlir")); + + TF_ASSERT_OK_AND_ASSIGN( + ServingExecutableRegistry::Handle handle, + ServingExecutableRegistry::Register(program_id, std::move(executable))); + auto handle_cleaner = gtl::MakeCleanup([&handle] { handle.Release(); }); + + AddInputFromArray(TensorShape({1, 3}), {1, 2, 3}); + AddInputFromArray(TensorShape({3, 1}), {1, 2, 3}); + TF_ASSERT_OK(RunOpKernel()); + Tensor expected_out = AsTensor({14}, TensorShape({1, 1})); + EXPECT_THAT(*GetOutput(0), TensorEq(expected_out)); +} + +} // namespace +} // namespace tfrt_stub +} // namespace tensorflow diff --git a/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc b/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc index e0acdcce10df8c..899b189046ebfb 100644 --- a/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc +++ b/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc @@ -288,22 +288,28 @@ absl::Status MlrtIfrtLoadVariableKernel::InvokeHelper() { if (used_by_host()) { TF_RETURN_IF_ERROR( ifrt_restore_tensor_registry.SetUsedByHost(runtime_name)); - } - xla::ifrt::Future restored_tensor_future = - ifrt_restore_tensor_registry.GetRestoredTensor(runtime_name); - - restored_tensor_future.OnReady( - [tensor_promise = std::move(tensor_promise)]( - absl::StatusOr restored_tensor) mutable { - if (!restored_tensor.ok()) { - std::move(tensor_promise).SetError(restored_tensor.status()); - return; - } - std::move(tensor_promise) - .Set( - tensorflow::tfrt_stub::FallbackTensor(*restored_tensor)); - }); + xla::ifrt::Future restored_tensor_future = + ifrt_restore_tensor_registry.GetRestoredTensor(runtime_name); + + restored_tensor_future.OnReady( + [tensor_promise = std::move(tensor_promise)]( + absl::StatusOr restored_tensor) mutable { + if (!restored_tensor.ok()) { + std::move(tensor_promise).SetError(restored_tensor.status()); + return; + } + std::move(tensor_promise) + .Set( + tensorflow::tfrt_stub::FallbackTensor(*restored_tensor)); + }); + } else { + // If not used by host, set the future to be ready immediately with an empty + // tensor so that it does not block the graph execution. + std::move(tensor_promise) + .Set( + tensorflow::tfrt_stub::FallbackTensor()); + } // Return the name as the key tensorflow::Tensor key_tensor(tensorflow::DT_STRING, {}); key_tensor.scalar()() = runtime_name; diff --git a/tensorflow/core/tfrt/run_handler_thread_pool/BUILD b/tensorflow/core/tfrt/run_handler_thread_pool/BUILD index a7c5fb24092b17..a70727213d4e0c 100644 --- a/tensorflow/core/tfrt/run_handler_thread_pool/BUILD +++ b/tensorflow/core/tfrt/run_handler_thread_pool/BUILD @@ -57,6 +57,7 @@ cc_library( "//tensorflow/core/protobuf:for_core_protos_cc", "//tensorflow/core/tfrt/runtime:work_queue_interface", "@eigen_archive//:eigen3", + "@local_tsl//tsl/platform:env", "@tf_runtime//:hostcontext", ], ) diff --git a/tensorflow/core/tfrt/run_handler_thread_pool/run_handler.cc b/tensorflow/core/tfrt/run_handler_thread_pool/run_handler.cc index c0147eb6392fb5..aafa0155f14426 100644 --- a/tensorflow/core/tfrt/run_handler_thread_pool/run_handler.cc +++ b/tensorflow/core/tfrt/run_handler_thread_pool/run_handler.cc @@ -32,13 +32,13 @@ limitations under the License. #include "tensorflow/core/platform/denormal.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/setround.h" -#include "tensorflow/core/platform/tracing.h" #include "tensorflow/core/profiler/lib/connected_traceme.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/profiler/lib/traceme_encode.h" #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler.h" #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.h" #include "tensorflow/core/tfrt/runtime/work_queue_interface.h" +#include "tsl/platform/tracing.h" #include "tfrt/host_context/async_dispatch.h" // from @tf_runtime namespace tfrt { @@ -72,10 +72,10 @@ RunHandlerEnvironment::EnvThread* RunHandlerEnvironment::CreateThread( RunHandlerEnvironment::Task RunHandlerEnvironment::CreateTask(TaskFunction f) { uint64_t id = 0; - if (tensorflow::tracing::EventCollector::IsEnabled()) { - id = tensorflow::tracing::GetUniqueArg(); - tensorflow::tracing::RecordEvent( - tensorflow::tracing::EventCategory::kScheduleClosure, id); + if (tsl::tracing::EventCollector::IsEnabled()) { + id = tsl::tracing::GetUniqueArg(); + tsl::tracing::RecordEvent(tsl::tracing::EventCategory::kScheduleClosure, + id); } return Task{ std::unique_ptr(new TaskImpl{ @@ -88,8 +88,8 @@ RunHandlerEnvironment::Task RunHandlerEnvironment::CreateTask(TaskFunction f) { void RunHandlerEnvironment::ExecuteTask(const Task& t) { tensorflow::WithContext wc(t.f->context); - tensorflow::tracing::ScopedRegion region( - tensorflow::tracing::EventCategory::kRunClosure, t.f->trace_id); + tsl::tracing::ScopedRegion region(tsl::tracing::EventCategory::kRunClosure, + t.f->trace_id); t.f->f(); } diff --git a/tensorflow/core/tfrt/runtime/BUILD b/tensorflow/core/tfrt/runtime/BUILD index 95712a19629fd8..c4cf2dda41b87e 100644 --- a/tensorflow/core/tfrt/runtime/BUILD +++ b/tensorflow/core/tfrt/runtime/BUILD @@ -153,6 +153,7 @@ tf_cc_shared_test( srcs = ["stream_test.cc"], tags = ["no_oss"], deps = [ + ":step_id", ":stream", "//tensorflow/core/framework:tensor", "//tensorflow/core/framework:tensor_testutil", @@ -162,6 +163,8 @@ tf_cc_shared_test( "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/memory", + "@com_google_absl//absl/status", + "@com_google_absl//absl/strings:string_view", "@com_google_absl//absl/time", "@com_google_googletest//:gtest_main", "@local_tsl//tsl/platform:env", diff --git a/tensorflow/core/tfrt/runtime/runtime.h b/tensorflow/core/tfrt/runtime/runtime.h index 830210d1426dab..1a6925c1535cc9 100644 --- a/tensorflow/core/tfrt/runtime/runtime.h +++ b/tensorflow/core/tfrt/runtime/runtime.h @@ -27,6 +27,8 @@ limitations under the License. #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" +#include "tensorflow/core/common_runtime/device_mgr.h" +#include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/platform/statusor.h" @@ -79,6 +81,11 @@ class ModelRuntimeContext { flib_def_ = flib_def; } + tensorflow::DeviceMgr* device_mgr() const { return device_mgr_; } + void set_device_mgr(tensorflow::DeviceMgr* device_mgr) { + device_mgr_ = device_mgr; + } + bool is_local_session() const { return is_local_session_; } void set_is_local_session(bool is_local_session) { @@ -104,6 +111,7 @@ class ModelRuntimeContext { const GraphDef* graph_def_ = nullptr; const CallableOptions* callable_options_ = nullptr; tfrt::ResourceContext* resource_context_ = nullptr; + tensorflow::DeviceMgr* device_mgr_ = nullptr; FunctionLibraryDefinition* flib_def_ = nullptr; diff --git a/tensorflow/core/tfrt/runtime/stream_test.cc b/tensorflow/core/tfrt/runtime/stream_test.cc index cac9113053bfab..bcb8a14a553675 100644 --- a/tensorflow/core/tfrt/runtime/stream_test.cc +++ b/tensorflow/core/tfrt/runtime/stream_test.cc @@ -15,6 +15,7 @@ limitations under the License. #include "tensorflow/core/tfrt/runtime/stream.h" #include +#include #include #include #include @@ -26,10 +27,13 @@ limitations under the License. #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/memory/memory.h" +#include "absl/status/status.h" +#include "absl/strings/string_view.h" #include "absl/time/clock.h" #include "absl/time/time.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" +#include "tensorflow/core/tfrt/runtime/step_id.h" #include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h" #include "tensorflow/core/tfrt/utils/thread_pool.h" #include "tsl/platform/env.h" @@ -44,27 +48,17 @@ using ::testing::AnyOf; using ::testing::ElementsAreArray; using ::testing::Pair; using ::testing::UnorderedElementsAre; - -class TestStreamInterface : public StreamControllerInterface { - public: - TestStreamInterface() : StreamControllerInterface("test_address") {} -}; - -const bool kUnused = []() { - GetGlobalStreamInterfaceFactory().RegisterController( - []() { return std::make_unique(); }); - return true; -}(); +using ::testing::status::StatusIs; TEST(StreamTest, Simple) { StreamCallbackId callback_id(1234); StepId step_id(5678); std::vector> outputs; - + ScopedStreamCallback scoped_stream_callback; { TF_ASSERT_OK_AND_ASSIGN( - auto scoped_stream_callback, + scoped_stream_callback, GetGlobalStreamCallbackRegistry().Register( "test_model", callback_id, step_id, [&](absl::flat_hash_map arg) { @@ -91,6 +85,16 @@ TEST(StreamTest, Simple) { ElementsAreArray({200})); EXPECT_THAT(GetTfTensorData(outputs[1]["c"]), ElementsAreArray({300})); + + ScopedStreamCallback scoped_stream_callback_copy; + scoped_stream_callback_copy = std::move(scoped_stream_callback); + + auto status = GetGlobalStreamCallbackRegistry().Register( + "test_model", callback_id, step_id, + [&](absl::flat_hash_map arg) { + outputs.push_back(std::move(arg)); + }); + EXPECT_THAT(status, StatusIs(absl::StatusCode::kAlreadyExists)); } TEST(StreamTest, MultipleWriters) { @@ -142,6 +146,63 @@ TEST(StreamTest, MultipleWriters) { } } +class TestStreamControllerInterface : public StreamControllerInterface { + public: + TestStreamControllerInterface() + : StreamControllerInterface("test_controller_address") {} +}; + +TEST(StreamControllerInterface, Initialize) { + GetGlobalStreamInterfaceFactory().RegisterController( + []() { return std::make_unique(); }); + TF_ASSERT_OK_AND_ASSIGN( + auto controller_interface, + GetGlobalStreamInterfaceFactory().CreateControllerStreamInterface()); + EXPECT_EQ(controller_interface->controller_address(), + "test_controller_address"); +} + +class TestStreamWorkerInterface : public StreamWorkerInterface { + public: + explicit TestStreamWorkerInterface(std::string worker_address) + : StreamWorkerInterface(worker_address) {} + absl::Status InvokeStreamCallback( + const StreamCallbackId& callback_id, + const std::vector& names, + const std::vector>>& + responses) override { + return absl::OkStatus(); + } +}; + +TEST(StreamWorkerInterface, Initialize) { + GetGlobalStreamInterfaceFactory().RegisterWorker( + [](absl::string_view address) + -> absl::StatusOr> { + return std::make_unique( + "test_worker_address"); + }); + TF_ASSERT_OK_AND_ASSIGN( + auto worker_interface, + GetGlobalStreamInterfaceFactory().CreateWorkerStreamInterface()( + "test_worker_address")); + EXPECT_EQ(worker_interface->controller_address(), "test_worker_address"); +} + +TEST(StepId, Generate) { + StepId step_id(1234); + EXPECT_EQ(step_id.id, 1234); + StepIdGenerator step_id_generator; + EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(1)); + EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(2)); + EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(3)); +} + +TEST(StepId, GlobalInitial) { + EXPECT_EQ(GetGlobalInitialStepId(), 0); + TEST_ScopedInitialStepId test_id(127); + EXPECT_EQ(GetGlobalInitialStepId(), 127); +} } // namespace } // namespace tfrt_stub } // namespace tensorflow diff --git a/tensorflow/core/tfrt/saved_model/BUILD b/tensorflow/core/tfrt/saved_model/BUILD index 3e49778bed8416..9e85c14baef362 100644 --- a/tensorflow/core/tfrt/saved_model/BUILD +++ b/tensorflow/core/tfrt/saved_model/BUILD @@ -118,6 +118,7 @@ cc_library( "//tensorflow/core:core_cpu_base", "//tensorflow/core:framework", "//tensorflow/core:lib", + "//tensorflow/core/common_runtime:device_mgr", "//tensorflow/core/framework:function_proto_cc", "//tensorflow/core/framework:graph_proto_cc", "//tensorflow/core/framework:tensor_proto_cc", diff --git a/tensorflow/core/tfrt/saved_model/saved_model.cc b/tensorflow/core/tfrt/saved_model/saved_model.cc index 85bc3fabc94fad..13ff33bb9e086c 100644 --- a/tensorflow/core/tfrt/saved_model/saved_model.cc +++ b/tensorflow/core/tfrt/saved_model/saved_model.cc @@ -577,6 +577,8 @@ absl::StatusOr> SavedModelImpl::LoadSavedModel( CombineSignatureDefs(meta_graph_def.signature_def()); model_context.set_graph_def(&meta_graph_def.graph_def()); model_context.set_callable_options(&callable_options); + model_context.set_device_mgr(&fallback_state->device_manager()); + TF_RETURN_IF_ERROR( options.graph_execution_options.runtime->CreateRuntimeResources( model_context)); diff --git a/tensorflow/core/tfrt/tfrt_session/tfrt_session.cc b/tensorflow/core/tfrt/tfrt_session/tfrt_session.cc index 8116c4aab1fce3..67475b0b23ed3e 100644 --- a/tensorflow/core/tfrt/tfrt_session/tfrt_session.cc +++ b/tensorflow/core/tfrt/tfrt_session/tfrt_session.cc @@ -35,6 +35,7 @@ limitations under the License. #include "Eigen/ThreadPool" // from @eigen_archive #include "llvm/ADT/STLExtras.h" #include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h" +#include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/local_session_selection.h" #include "tensorflow/core/common_runtime/process_util.h" #include "tensorflow/core/common_runtime/session_factory.h" @@ -218,6 +219,7 @@ class TfrtSession : public tensorflow::Session { &options, /*export_dir=*/"unknown_export_dir", resource_context.get()); // TODO(b/334641254): Offer a Session option that prunes the graph_def. model_context.set_graph_def(&graph); + model_context.set_device_mgr(&fallback_state->device_manager()); // In the multi-host case, this prevents local Sessions from running // global resource creation functions. model_context.set_is_local_session( diff --git a/tensorflow/core/tfrt/utils/BUILD b/tensorflow/core/tfrt/utils/BUILD index 7517e07928f87b..7c5cf7a46579a5 100644 --- a/tensorflow/core/tfrt/utils/BUILD +++ b/tensorflow/core/tfrt/utils/BUILD @@ -155,9 +155,10 @@ tf_cc_test( deps = [ ":error_util", "//tensorflow/core/platform:status", + "@com_google_absl//absl/status", "@com_google_googletest//:gtest_main", + "@local_xla//xla/tsl/concurrency:async_value", "@tf_runtime//:support", - "@tf_runtime//cpp_tests:common", ], ) diff --git a/tensorflow/core/tfrt/utils/error_util.cc b/tensorflow/core/tfrt/utils/error_util.cc index 1f8fb6a6770570..2530b98f051041 100644 --- a/tensorflow/core/tfrt/utils/error_util.cc +++ b/tensorflow/core/tfrt/utils/error_util.cc @@ -47,10 +47,4 @@ tensorflow::Status ToTfStatus(const tfrt::AsyncValue* av) { return absl::OkStatus(); } -absl::Status AbslStatusFromTfStatus(tensorflow::Status status) { - if (status.ok()) return absl::OkStatus(); - return absl::Status(static_cast(status.code()), - status.message()); -} - } // namespace tfrt diff --git a/tensorflow/core/tfrt/utils/error_util.h b/tensorflow/core/tfrt/utils/error_util.h index e694931f82e2e5..ee7bcd81dd913f 100644 --- a/tensorflow/core/tfrt/utils/error_util.h +++ b/tensorflow/core/tfrt/utils/error_util.h @@ -76,8 +76,6 @@ inline llvm::Error MakeStatusError(tensorflow::Status status) { return MakeStringError(MakeStatusString(status)); } -absl::Status AbslStatusFromTfStatus(tensorflow::Status status); - } // namespace tfrt #endif // TENSORFLOW_CORE_TFRT_UTILS_ERROR_UTIL_H_ diff --git a/tensorflow/core/tfrt/utils/error_util_test.cc b/tensorflow/core/tfrt/utils/error_util_test.cc index 07c65905825a53..06edb63c897af4 100644 --- a/tensorflow/core/tfrt/utils/error_util_test.cc +++ b/tensorflow/core/tfrt/utils/error_util_test.cc @@ -16,6 +16,8 @@ limitations under the License. #include #include +#include "absl/status/status.h" +#include "xla/tsl/concurrency/async_value_ref.h" #include "tensorflow/core/platform/status.h" #include "tfrt/support/error_util.h" // from @tf_runtime @@ -38,5 +40,13 @@ TEST(ErrorUtilTest, UnsupportedErrorConversion) { tfrt::ErrorCode::kUnknown); } +TEST(ErrorUtilTest, ToTfStatusError) { + auto error_av = + tsl::MakeErrorAsyncValueRef(absl::UnauthenticatedError("test_error")); + auto status = ToTfStatus(error_av.get()); + EXPECT_EQ(status.code(), absl::StatusCode::kUnauthenticated); + EXPECT_EQ(status.message(), "test_error"); +} + } // namespace } // namespace tfrt diff --git a/tensorflow/core/tpu/kernels/BUILD b/tensorflow/core/tpu/kernels/BUILD index 50334814c993b3..7d55de7bbc1de3 100644 --- a/tensorflow/core/tpu/kernels/BUILD +++ b/tensorflow/core/tpu/kernels/BUILD @@ -1481,6 +1481,21 @@ cc_library( visibility = ["//visibility:public"], ) +cc_library( + name = "_pywrap_sparse_core_layout_header_only", + srcs = [], + hdrs = ["sparse_core_layout.h"], + visibility = ["//tensorflow/python/tpu:__pkg__"], # ONLY for `_pywrap_sparse_core_layout`. + deps = [ + ":sparse_core_layout_proto_cc", + "//tensorflow/core/platform:stringpiece", + "@com_google_absl//absl/log", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/status", + "@com_google_absl//absl/status:statusor", + ], +) + cc_library( name = "sparse_core_layout", srcs = ["sparse_core_layout.cc"], diff --git a/tensorflow/core/tpu/kernels/tpu_execute_op.cc b/tensorflow/core/tpu/kernels/tpu_execute_op.cc index 02654c0d6a2cc6..7a9812be9ae43b 100644 --- a/tensorflow/core/tpu/kernels/tpu_execute_op.cc +++ b/tensorflow/core/tpu/kernels/tpu_execute_op.cc @@ -42,7 +42,6 @@ limitations under the License. #include "xla/shape.h" #include "xla/shape_tree.h" #include "xla/shape_util.h" -#include "xla/status.h" #include "xla/status_macros.h" #include "xla/statusor.h" #include "xla/stream_executor/device_memory.h" @@ -739,9 +738,8 @@ Status TPUExecuteOp::DoWork(OpKernelContext* context) { }); } - auto definition_event = std::make_shared(stream->parent()); - TF_RET_CHECK(definition_event->Init()) - << "TPU definition event initialization failed"; + TF_ASSIGN_OR_RETURN(std::shared_ptr definition_event, + stream->parent()->CreateEvent()); trace_me_init.Stop(); diff --git a/tensorflow/core/tpu/kernels/tpu_reshard_variables_op.cc b/tensorflow/core/tpu/kernels/tpu_reshard_variables_op.cc index 96ae248347474f..e311a802a158f3 100644 --- a/tensorflow/core/tpu/kernels/tpu_reshard_variables_op.cc +++ b/tensorflow/core/tpu/kernels/tpu_reshard_variables_op.cc @@ -216,9 +216,8 @@ Status TPUReshardVariablesOpKernel::DoTpuExecute( TF_RET_CHECK(!executable->has_session_module()) << "session module not supported in sharding/unsharding program."; - auto definition_event = std::make_shared(stream->parent()); - TF_RET_CHECK(definition_event->Init()) - << "TPU definition event initialization failed"; + TF_ASSIGN_OR_RETURN(std::shared_ptr definition_event, + stream->parent()->CreateEvent()); trace_me_init.Stop(); diff --git a/tensorflow/core/tpu/tpu_execute.cc b/tensorflow/core/tpu/tpu_execute.cc index 66b756213c46a6..8daf680e09314f 100644 --- a/tensorflow/core/tpu/tpu_execute.cc +++ b/tensorflow/core/tpu/tpu_execute.cc @@ -46,7 +46,6 @@ limitations under the License. #include "xla/shape_layout.h" #include "xla/shape_tree.h" #include "xla/shape_util.h" -#include "xla/status.h" #include "xla/status_macros.h" #include "xla/statusor.h" #include "xla/stream_executor/device_memory.h" diff --git a/tensorflow/dtensor/cc/dtensor_device.cc b/tensorflow/dtensor/cc/dtensor_device.cc index 6632f22be4f6d7..ca1460e6260990 100644 --- a/tensorflow/dtensor/cc/dtensor_device.cc +++ b/tensorflow/dtensor/cc/dtensor_device.cc @@ -2826,7 +2826,7 @@ void ExperimentalSetDefaultLayout(const std::string& serialized_layout, StatusOr layout = Layout::FromString(serialized_layout); if (!layout.ok()) { RETURN_STATUS(status, TF_INTERNAL, - tsl::NullTerminatedMessage(layout.status())); + absl::StatusMessageAsCStr(layout.status())); } DTensorDevice* device = reinterpret_cast(device_info); device->SetDefaultLayout(layout.value()); @@ -2842,7 +2842,7 @@ void ExperimentalSetDefaultMesh(const std::string& serialized_mesh, StatusOr mesh = Mesh::FromString(serialized_mesh); if (!mesh.ok()) { RETURN_STATUS(status, TF_INTERNAL, - tsl::NullTerminatedMessage(mesh.status())); + absl::StatusMessageAsCStr(mesh.status())); } DTensorDevice* device = reinterpret_cast(device_info); device->SetDefaultMesh(mesh.value()); diff --git a/tensorflow/dtensor/mlir/BUILD b/tensorflow/dtensor/mlir/BUILD index e92270975a2f94..5cd39d3619908d 100644 --- a/tensorflow/dtensor/mlir/BUILD +++ b/tensorflow/dtensor/mlir/BUILD @@ -273,6 +273,7 @@ cc_library( "@com_google_absl//absl/types:span", "@llvm-project//llvm:Support", "@llvm-project//mlir:FuncDialect", + "@llvm-project//mlir:FunctionInterfaces", "@llvm-project//mlir:IR", "@llvm-project//mlir:Pass", "@llvm-project//mlir:SideEffectInterfaces", diff --git a/tensorflow/dtensor/mlir/expansions/reduce_spmd_expander.cc b/tensorflow/dtensor/mlir/expansions/reduce_spmd_expander.cc index 1ee4a60cd4d3f6..4905d49c8faf1a 100644 --- a/tensorflow/dtensor/mlir/expansions/reduce_spmd_expander.cc +++ b/tensorflow/dtensor/mlir/expansions/reduce_spmd_expander.cc @@ -132,11 +132,11 @@ Status ExtractDims( auto data_format = bias_add_grad_op.getDataFormat(); // rank is at least 2 (required by BiasAddGrad). int rank = ValueRank(bias_add_grad_op->getOperand(0)); - if (data_format.equals("NHWC")) { + if (data_format == "NHWC") { for (int dim = 0; dim < rank - 1; ++dim) { reduced_dims->push_back(dim); } - } else if (data_format.equals("NCHW")) { + } else if (data_format == "NCHW") { for (int dim = 0; dim < rank; ++dim) { if (dim == 1) continue; reduced_dims->push_back(dim); diff --git a/tensorflow/dtensor/mlir/function_renaming.cc b/tensorflow/dtensor/mlir/function_renaming.cc index 0c485e91878d15..3453986221730c 100644 --- a/tensorflow/dtensor/mlir/function_renaming.cc +++ b/tensorflow/dtensor/mlir/function_renaming.cc @@ -17,13 +17,14 @@ limitations under the License. #include #include "absl/strings/str_cat.h" +#include "llvm/ADT/STLExtras.h" #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project #include "mlir/IR/Attributes.h" // from @llvm-project +#include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "mlir/IR/Operation.h" // from @llvm-project #include "mlir/IR/SymbolTable.h" // from @llvm-project #include "mlir/Pass/Pass.h" // from @llvm-project #include "tensorflow/dtensor/cc/constants.h" -#include "tensorflow/dtensor/mlir/dtensor_mlir_passes.h" namespace tensorflow { namespace dtensor { diff --git a/tensorflow/dtensor/mlir/handle_cross_cluster_dependencies.cc b/tensorflow/dtensor/mlir/handle_cross_cluster_dependencies.cc index 6877f77dd25f37..5244e18e13c724 100644 --- a/tensorflow/dtensor/mlir/handle_cross_cluster_dependencies.cc +++ b/tensorflow/dtensor/mlir/handle_cross_cluster_dependencies.cc @@ -22,17 +22,23 @@ limitations under the License. #include "mlir/IR/Builders.h" // from @llvm-project #include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "mlir/IR/BuiltinTypes.h" // from @llvm-project +#include "mlir/IR/DialectRegistry.h" // from @llvm-project +#include "mlir/IR/MLIRContext.h" // from @llvm-project #include "mlir/IR/Operation.h" // from @llvm-project #include "mlir/IR/UseDefLists.h" // from @llvm-project #include "mlir/IR/Value.h" // from @llvm-project -#include "mlir/Support/DebugStringHelper.h" // from @llvm-project +#include "mlir/IR/Visitors.h" // from @llvm-project +#include "mlir/Pass/Pass.h" // from @llvm-project #include "mlir/Support/LLVM.h" // from @llvm-project #include "mlir/Support/LogicalResult.h" // from @llvm-project -#include "mlir/Transforms/Passes.h" // from @llvm-project #include "mlir/Transforms/RegionUtils.h" // from @llvm-project #include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h" +#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" +#include "tensorflow/dtensor/cc/dstatus.h" +#include "tensorflow/dtensor/cc/tensor_layout.h" #include "tensorflow/dtensor/mlir/dtensor_dialect/ir/dialect.h" +#include "tensorflow/dtensor/mlir/dtensor_dialect/ir/dtensor_attributes.h" #include "tensorflow/dtensor/mlir/ir/tf_dtensor.h" #include "tensorflow/dtensor/mlir/layout_parsing.h" #include "tensorflow/dtensor/mlir/spmd_expander_common.h" diff --git a/tensorflow/dtensor/mlir/handle_sparsetensors.cc b/tensorflow/dtensor/mlir/handle_sparsetensors.cc index 678d8a41841114..1464c47beaf4a4 100644 --- a/tensorflow/dtensor/mlir/handle_sparsetensors.cc +++ b/tensorflow/dtensor/mlir/handle_sparsetensors.cc @@ -20,6 +20,8 @@ limitations under the License. #include #include "absl/container/flat_hash_set.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_join.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project @@ -27,23 +29,22 @@ limitations under the License. #include "mlir/IR/Builders.h" // from @llvm-project #include "mlir/IR/BuiltinAttributes.h" // from @llvm-project #include "mlir/IR/BuiltinOps.h" // from @llvm-project +#include "mlir/IR/BuiltinTypeInterfaces.h" // from @llvm-project #include "mlir/IR/BuiltinTypes.h" // from @llvm-project #include "mlir/IR/Operation.h" // from @llvm-project -#include "mlir/IR/SymbolTable.h" // from @llvm-project +#include "mlir/IR/OperationSupport.h" // from @llvm-project +#include "mlir/IR/Value.h" // from @llvm-project +#include "mlir/IR/ValueRange.h" // from @llvm-project #include "mlir/IR/Visitors.h" // from @llvm-project +#include "mlir/Interfaces/FunctionInterfaces.h" // from @llvm-project #include "mlir/Pass/Pass.h" // from @llvm-project #include "mlir/Pass/PassManager.h" // from @llvm-project #include "mlir/Support/LLVM.h" // from @llvm-project #include "mlir/Support/LogicalResult.h" // from @llvm-project -#include "mlir/Transforms/Passes.h" // from @llvm-project #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.h" -#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h" #include "tensorflow/dtensor/cc/constants.h" -#include "tensorflow/dtensor/mlir/device_utils.h" -#include "tensorflow/dtensor/mlir/dtensor_mlir_passes.h" -#include "tensorflow/dtensor/mlir/op_utils.h" -#include "tensorflow/dtensor/mlir/spmd_expander_common.h" +#include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/mlir/value_utils.h" namespace tensorflow { diff --git a/tensorflow/lite/acceleration/configuration/configuration.proto b/tensorflow/lite/acceleration/configuration/configuration.proto index a2bbd88004f8f6..cf82771c225328 100644 --- a/tensorflow/lite/acceleration/configuration/configuration.proto +++ b/tensorflow/lite/acceleration/configuration/configuration.proto @@ -317,6 +317,10 @@ enum XNNPackFlags { message XNNPackSettings { optional int32 num_threads = 1; optional XNNPackFlags flags = 2 [default = TFLITE_XNNPACK_DELEGATE_NO_FLAGS]; + // Path to the experimental XNNPack cache file. XNNPack packed buffers are + // saved to and reloaded from this cache which can reduce initialization time + // and the packing memory footprint. + optional string experimental_weight_cache_file_path = 3; } // CoreML Delegate settings. diff --git a/tensorflow/lite/acceleration/configuration/configuration_generated.h b/tensorflow/lite/acceleration/configuration/configuration_generated.h index 6d65817edf2f99..5fd634acad5837 100644 --- a/tensorflow/lite/acceleration/configuration/configuration_generated.h +++ b/tensorflow/lite/acceleration/configuration/configuration_generated.h @@ -1692,6 +1692,7 @@ struct XNNPackSettingsT : public ::flatbuffers::NativeTable { typedef XNNPackSettings TableType; int32_t num_threads = 0; tflite::XNNPackFlags flags = tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS; + std::string experimental_weight_cache_file_path{}; }; struct XNNPackSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { @@ -1699,7 +1700,8 @@ struct XNNPackSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { typedef XNNPackSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_NUM_THREADS = 4, - VT_FLAGS = 6 + VT_FLAGS = 6, + VT_EXPERIMENTAL_WEIGHT_CACHE_FILE_PATH = 8 }; int32_t num_threads() const { return GetField(VT_NUM_THREADS, 0); @@ -1707,10 +1709,15 @@ struct XNNPackSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { tflite::XNNPackFlags flags() const { return static_cast(GetField(VT_FLAGS, 0)); } + const ::flatbuffers::String *experimental_weight_cache_file_path() const { + return GetPointer(VT_EXPERIMENTAL_WEIGHT_CACHE_FILE_PATH); + } bool Verify(::flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUM_THREADS, 4) && VerifyField(verifier, VT_FLAGS, 4) && + VerifyOffset(verifier, VT_EXPERIMENTAL_WEIGHT_CACHE_FILE_PATH) && + verifier.VerifyString(experimental_weight_cache_file_path()) && verifier.EndTable(); } XNNPackSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; @@ -1728,6 +1735,9 @@ struct XNNPackSettingsBuilder { void add_flags(tflite::XNNPackFlags flags) { fbb_.AddElement(XNNPackSettings::VT_FLAGS, static_cast(flags), 0); } + void add_experimental_weight_cache_file_path(::flatbuffers::Offset<::flatbuffers::String> experimental_weight_cache_file_path) { + fbb_.AddOffset(XNNPackSettings::VT_EXPERIMENTAL_WEIGHT_CACHE_FILE_PATH, experimental_weight_cache_file_path); + } explicit XNNPackSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); @@ -1742,13 +1752,28 @@ struct XNNPackSettingsBuilder { inline ::flatbuffers::Offset CreateXNNPackSettings( ::flatbuffers::FlatBufferBuilder &_fbb, int32_t num_threads = 0, - tflite::XNNPackFlags flags = tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS) { + tflite::XNNPackFlags flags = tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS, + ::flatbuffers::Offset<::flatbuffers::String> experimental_weight_cache_file_path = 0) { XNNPackSettingsBuilder builder_(_fbb); + builder_.add_experimental_weight_cache_file_path(experimental_weight_cache_file_path); builder_.add_flags(flags); builder_.add_num_threads(num_threads); return builder_.Finish(); } +inline ::flatbuffers::Offset CreateXNNPackSettingsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_threads = 0, + tflite::XNNPackFlags flags = tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS, + const char *experimental_weight_cache_file_path = nullptr) { + auto experimental_weight_cache_file_path__ = experimental_weight_cache_file_path ? _fbb.CreateString(experimental_weight_cache_file_path) : 0; + return tflite::CreateXNNPackSettings( + _fbb, + num_threads, + flags, + experimental_weight_cache_file_path__); +} + ::flatbuffers::Offset CreateXNNPackSettings(::flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); struct CoreMLSettingsT : public ::flatbuffers::NativeTable { @@ -4911,7 +4936,8 @@ inline ::flatbuffers::Offset CreateHexagonSettings(::flatbuffer inline bool operator==(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs) { return (lhs.num_threads == rhs.num_threads) && - (lhs.flags == rhs.flags); + (lhs.flags == rhs.flags) && + (lhs.experimental_weight_cache_file_path == rhs.experimental_weight_cache_file_path); } inline bool operator!=(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs) { @@ -4930,6 +4956,7 @@ inline void XNNPackSettings::UnPackTo(XNNPackSettingsT *_o, const ::flatbuffers: (void)_resolver; { auto _e = num_threads(); _o->num_threads = _e; } { auto _e = flags(); _o->flags = _e; } + { auto _e = experimental_weight_cache_file_path(); if (_e) _o->experimental_weight_cache_file_path = _e->str(); } } inline ::flatbuffers::Offset XNNPackSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { @@ -4942,10 +4969,12 @@ inline ::flatbuffers::Offset CreateXNNPackSettings(::flatbuffer struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const XNNPackSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _num_threads = _o->num_threads; auto _flags = _o->flags; + auto _experimental_weight_cache_file_path = _o->experimental_weight_cache_file_path.empty() ? 0 : _fbb.CreateString(_o->experimental_weight_cache_file_path); return tflite::CreateXNNPackSettings( _fbb, _num_threads, - _flags); + _flags, + _experimental_weight_cache_file_path); } diff --git a/tensorflow/lite/acceleration/configuration/testdata/configuration.proto_prev b/tensorflow/lite/acceleration/configuration/testdata/configuration.proto_prev index 655d95f5990120..e9616ecb4afb65 100644 --- a/tensorflow/lite/acceleration/configuration/testdata/configuration.proto_prev +++ b/tensorflow/lite/acceleration/configuration/testdata/configuration.proto_prev @@ -97,6 +97,8 @@ enum Delegate { CORE_ML = 7; // Arm NN Delegate. ARMNN = 8; + // MediaTek Neuron Delegate. + MTK_NEURON = 9; } enum NNAPIExecutionPreference { @@ -662,6 +664,111 @@ message ArmNNSettings { optional string additional_parameters = 3; } +// MediaTek Neuron Delegate Settings. +// See https://neuropilot.mediatek.com/ for more information. +message MtkNeuronSettings { + enum ExecutionPreference { + PREFERENCE_UNDEFINED = 0; + + // Prefer execution in a power-efficient mode, optimizing for low power + // consumption. + PREFERENCE_LOW_POWER = 1; + + // Prefer execution that provides shorter single-shot latency, optimizing + // for fast response times. + PREFERENCE_FAST_SINGLE_ANSWER = 2; + + // Prefer execution that provides sustained speed for continuous operation + // and higher throughput, optimizing for overall performance in ongoing or + // repetitive tasks. + PREFERENCE_SUSTAINED_SPEED = 3; + + // Prefer execution in the turbo boost mode, which may boost the frequencies + // of APU and other system components such as CPU and DRAM, to achieve + // maximum performance. If boosting is not supported in the underlying + // system, it falls back to the behavior of PREFERENCE_FAST_SINGLE_ANSWER. + PREFERENCE_TURBO_BOOST = 4; + } + + enum ExecutionPriority { + PRIORITY_UNDEFINED = 0; + PRIORITY_LOW = 90; + PRIORITY_MEDIUM = 100; + PRIORITY_HIGH = 110; + } + + enum OptimizationHint { + OPTIMIZATION_NONE = 0; + + // Optimization hint for reducing latency. This hint may distribute the + // workload across multiple APU cores in the compiled model to achieve + // faster execution. + OPTIMIZATION_LOW_LATENCY = 1; + + // Optimization hint for reducing DRAM access and minimizing memory + // bandwidth usage through kernel fusion and data fusion techniques. + OPTIMIZATION_DEEP_FUSION = 2; + + // Optimization hint for processing multiple input samples in parallel + // across available APU cores in the batch dimension. This optimization is + // effective for models with a batch size greater than 1. + OPTIMIZATION_BATCH_PROCESSING = 3; + } + + // How to check the operator compatibility with the underlying accelerator. + enum OperationCheckMode { + NO_OPERATION_CHECK = 0; + + // Checks each node separately with multiple queries to the backend. + PER_NODE_OPERATION_CHECK = 1; + + // Checks all nodes in the graph at once with a batched query to the + // backend. + PRE_OPERATION_CHECK = 2; + } + + // The preferred execution mode. The system-wide default will be used when + // PREFERENCE_UNDEFINED is passed to the delegate. + optional ExecutionPreference execution_preference = 1; + + // The execution priority of the inference request. The system-wide default + // will be used when PRIORITY_UNDEFINED is passed to the delegate. + optional ExecutionPriority execution_priority = 2; + + // The optimization hints that will instruct the model compiler. + repeated OptimizationHint optimization_hints = 3 [packed = true]; + + // Whether and how to check the operator compatibility with the underlying + // accelerator. + optional OperationCheckMode operation_check_mode = 4; + + // Whether to allow the accelerator to optionally use lower-precision FP16 + // arithmetic when performing calculations on FP32 data. + optional bool allow_fp16_precision_for_fp32 = 5; + + // Whether to use AHardwareBuffer_* API to manage buffers. Requires Android + // API level >= 26, or a dedicated AHardwareBuffer API shim on non-Android + // platforms. + optional bool use_ahwb = 6; + + // Whether to use cachable (consistent / coherent) memory. This will affect + // both buffer allocation and buffer importing behaviors. + optional bool use_cacheable_buffer = 7 [default = true]; + + // Extra options for the Neuron compiler, such as "--opt-bw". + // See docs at https://neuropilot.mediatek.com/ for available options. + repeated string compile_options = 8; + + // Optional list of target accelerator device names. + // If empty, the delegate will automatically select the accelerator. + // See docs at https://neuropilot.mediatek.com/ for available accelerators. + repeated string accelerator_names = 9; + + // Optional path to the platform-dependent Neuron configuration file. + // See docs at https://neuropilot.mediatek.com/ for more details. + optional string neuron_config_path = 10; +} + // How to configure TFLite. message TFLiteSettings { // Which delegate to use. @@ -719,6 +826,9 @@ message TFLiteSettings { // For configuring the Arm NN delegate. optional ArmNNSettings armnn_settings = 16; + + // For configuring MediaTek Neuron delegate. + optional MtkNeuronSettings mtk_neuron_settings = 17; } // Whether to automatically fallback to TFLite CPU path on delegation errors. diff --git a/tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.cc b/tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.cc index c82b0a7964af04..c2e9ec540fcbb0 100644 --- a/tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.cc +++ b/tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.cc @@ -39,6 +39,10 @@ static TfLiteDelegate* CreateDelegate(const void* settings) { if (xnnpack_settings->flags()) { options.flags = xnnpack_settings->flags(); } + if (xnnpack_settings->experimental_weight_cache_file_path()) { + options.experimental_weight_cache_file_path = + xnnpack_settings->experimental_weight_cache_file_path()->c_str(); + } } return TfLiteXNNPackDelegateCreate(&options); } diff --git a/tensorflow/lite/core/c/common.h b/tensorflow/lite/core/c/common.h index ea54be9490ef01..96f19f12336bc4 100644 --- a/tensorflow/lite/core/c/common.h +++ b/tensorflow/lite/core/c/common.h @@ -472,6 +472,8 @@ typedef enum TfLiteCustomAllocationFlags { kTfLiteCustomAllocationFlagsSkipAlignCheck = 1, } TfLiteCustomAllocationFlags; +enum { kTfLiteNoBufferIdentifier = SIZE_MAX }; + /// A tensor in the interpreter system which is a wrapper around a buffer of /// data including a dimensionality (or NULL if not currently defined). #ifndef TF_LITE_STATIC_MEMORY diff --git a/tensorflow/lite/core/interpreter_builder.cc b/tensorflow/lite/core/interpreter_builder.cc index 41e62cfd675340..d8c6d181ebdd1a 100644 --- a/tensorflow/lite/core/interpreter_builder.cc +++ b/tensorflow/lite/core/interpreter_builder.cc @@ -691,7 +691,8 @@ TfLiteStatus InterpreterBuilder::ParseTensors( if (subgraph->SetTensorParametersReadOnly( i, type, get_name(tensor), dims, quantization, buffer_ptr, - buffer_size, allocation_, sparsity) != kTfLiteOk) { + buffer_size, allocation_, sparsity, + /*buffer_identifier=*/tensor->buffer()) != kTfLiteOk) { TF_LITE_REPORT_ERROR(error_reporter_, "Tensor %d is invalidly specified in schema.\n", i); diff --git a/tensorflow/lite/core/kernels/BUILD b/tensorflow/lite/core/kernels/BUILD index fcb5a458edcbce..71f9980fe1b957 100644 --- a/tensorflow/lite/core/kernels/BUILD +++ b/tensorflow/lite/core/kernels/BUILD @@ -25,7 +25,6 @@ cc_test( ":builtin_ops", "//tensorflow/lite:mutable_op_resolver", "//tensorflow/lite/delegates/xnnpack:xnnpack_delegate", - "//tensorflow/lite/delegates/xnnpack:xnnpack_delegate_test", "//tensorflow/lite/schema:schema_fbs", "@com_google_googletest//:gtest_main", ], diff --git a/tensorflow/lite/core/kernels/register_test.cc b/tensorflow/lite/core/kernels/register_test.cc index f47a7f84397c35..6bbc1bd0db07ae 100644 --- a/tensorflow/lite/core/kernels/register_test.cc +++ b/tensorflow/lite/core/kernels/register_test.cc @@ -20,7 +20,6 @@ limitations under the License. #include #include #include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" -#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate_test.h" #include "tensorflow/lite/mutable_op_resolver.h" #include "tensorflow/lite/schema/schema_generated.h" @@ -57,13 +56,13 @@ TEST(BuiltinOpResolverTest, HasXNNPACKDelegate_QS8) { builtin_op_resolver.GetDelegateCreators()[0]; std::unique_ptr delegate = delegate_creator(nullptr); - const void *delegate_data = TfLiteOpaqueDelegateGetData(delegate.get()); - TfLiteXNNPackDelegateOptions options = GetOptions(delegate_data); + const TfLiteXNNPackDelegateOptions *options = + TfLiteXNNPackDelegateGetOptions(delegate.get()); - ASSERT_EQ(options.flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8, + ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8, TFLITE_XNNPACK_DELEGATE_FLAG_QU8); - ASSERT_EQ(options.flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8, + ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8, TFLITE_XNNPACK_DELEGATE_FLAG_QS8); } @@ -74,13 +73,13 @@ TEST(BuiltinOpResolverTest, HasXNNPACKDelegate_QS8_QU8) { builtin_op_resolver.GetDelegateCreators()[0]; std::unique_ptr delegate = delegate_creator(nullptr); - const void *delegate_data = TfLiteOpaqueDelegateGetData(delegate.get()); - TfLiteXNNPackDelegateOptions options = GetOptions(delegate_data); + const TfLiteXNNPackDelegateOptions *options = + TfLiteXNNPackDelegateGetOptions(delegate.get()); - ASSERT_EQ(options.flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8, + ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8, TFLITE_XNNPACK_DELEGATE_FLAG_QU8); - ASSERT_EQ(options.flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8, + ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8, TFLITE_XNNPACK_DELEGATE_FLAG_QS8); } @@ -91,12 +90,12 @@ TEST(BuiltinOpResolverTest, Disable_QU8) { builtin_op_resolver.GetDelegateCreators()[0]; std::unique_ptr delegate = delegate_creator(nullptr); - const void *delegate_data = TfLiteOpaqueDelegateGetData(delegate.get()); - TfLiteXNNPackDelegateOptions options = GetOptions(delegate_data); + const TfLiteXNNPackDelegateOptions *options = + TfLiteXNNPackDelegateGetOptions(delegate.get()); - ASSERT_EQ(options.flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8, 0); + ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8, 0); - ASSERT_EQ(options.flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8, + ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8, TFLITE_XNNPACK_DELEGATE_FLAG_QS8); } #endif // TFLITE_WITHOUT_XNNPACK diff --git a/tensorflow/lite/core/macros.h b/tensorflow/lite/core/macros.h index f1d10aec93229e..9eab6be877d2e5 100644 --- a/tensorflow/lite/core/macros.h +++ b/tensorflow/lite/core/macros.h @@ -74,7 +74,7 @@ inline constexpr char tflite_metadata_buffer_location[] = "buffer_location"; inline constexpr char tflite_metadata_min_runtime_version[] = "min_runtime_version"; // the stablehlo op version is supported by the tflite runtime -inline constexpr char tflite_supported_stablehlo_version[] = "0.19.0"; +inline constexpr char tflite_supported_stablehlo_version[] = "1.0.0"; #endif #endif // TENSORFLOW_LITE_CORE_MACROS_H_ diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc index 26ba2037342405..ce3622105b1ce5 100644 --- a/tensorflow/lite/core/subgraph.cc +++ b/tensorflow/lite/core/subgraph.cc @@ -1856,7 +1856,8 @@ TfLiteStatus Subgraph::GetNodeAndRegistration( TfLiteStatus Subgraph::SetTensorParametersReadOnly( int tensor_index, TfLiteType type, const char* name, const size_t ndims, const int* dims, TfLiteQuantization quantization, const char* buffer, - size_t bytes, const Allocation* allocation, TfLiteSparsity* sparsity) { + size_t bytes, const Allocation* allocation, TfLiteSparsity* sparsity, + const size_t buffer_identifier) { // Ensure quantization cleanup on failure. ScopedTfLiteQuantization scoped_quantization(&quantization); ScopedTfLiteSparsity scoped_sparsity(sparsity); @@ -1904,6 +1905,9 @@ TfLiteStatus Subgraph::SetTensorParametersReadOnly( tensor.quantization = *scoped_quantization.release(); tensor.sparsity = scoped_sparsity.release(); } + if (buffer_identifier != kTfLiteNoBufferIdentifier) { + tensor_buffer_identifiers_[tensor_index] = buffer_identifier; + } return kTfLiteOk; } diff --git a/tensorflow/lite/core/subgraph.h b/tensorflow/lite/core/subgraph.h index 5940bfbb232ca3..281ac04adc2096 100644 --- a/tensorflow/lite/core/subgraph.h +++ b/tensorflow/lite/core/subgraph.h @@ -23,6 +23,7 @@ limitations under the License. #include #include #include +#include #include #include #include @@ -132,16 +133,18 @@ class Subgraph { int tensor_index, TfLiteType type, const char* name, const std::vector& dims, TfLiteQuantization quantization, const char* buffer, size_t bytes, const Allocation* allocation = nullptr, - TfLiteSparsity* sparsity = nullptr) { + TfLiteSparsity* sparsity = nullptr, + size_t buffer_identifier = kTfLiteNoBufferIdentifier) { return SetTensorParametersReadOnly(tensor_index, type, name, dims.size(), dims.data(), quantization, buffer, bytes, - allocation, sparsity); + allocation, sparsity, buffer_identifier); } TfLiteStatus SetTensorParametersReadOnly( int tensor_index, TfLiteType type, const char* name, const size_t ndims, const int* dims, TfLiteQuantization quantization, const char* buffer, size_t bytes, const Allocation* allocation = nullptr, - TfLiteSparsity* sparsity = nullptr); + TfLiteSparsity* sparsity = nullptr, + size_t buffer_identifier = kTfLiteNoBufferIdentifier); // Set description of inputs/outputs/data/fptrs for node `node_index`. // This variant assumes an external buffer has been allocated of size @@ -589,6 +592,10 @@ class Subgraph { // Returns true if the subgraph has been fully delegated. bool IsFullyDelegated() const; + const std::unordered_map& GetTensorBufferIdentifiers() { + return tensor_buffer_identifiers_; + } + private: #ifndef DOXYGEN_SKIP friend class tflite::impl::InterpreterBuilder; @@ -1153,6 +1160,10 @@ class Subgraph { /// The allocator used for holding memory of the model. Note that this will /// be null if the client provides a tflite::Model directly. const Allocation* allocation_ = nullptr; + + // Maps tensor constant buffers used in the subgraph to a model-wide + // identifiers. + std::unordered_map tensor_buffer_identifiers_; }; } // namespace tflite diff --git a/tensorflow/lite/delegates/gpu/cl/cl_device.cc b/tensorflow/lite/delegates/gpu/cl/cl_device.cc index 852cc95eedf261..4f3ea93b1cf792 100644 --- a/tensorflow/lite/delegates/gpu/cl/cl_device.cc +++ b/tensorflow/lite/delegates/gpu/cl/cl_device.cc @@ -344,21 +344,20 @@ GpuInfo GpuInfoFromDeviceID(cl_device_id id, cl_platform_id platform_id) { } } - if (info.IsIntel()) { - if (info.SupportsExtension("cl_intel_required_subgroup_size")) { - size_t sub_groups_count; - cl_int status = - clGetDeviceInfo(id, 0x4108 /*CL_DEVICE_SUB_GROUP_SIZES_INTEL*/, 0, - nullptr, &sub_groups_count); + if (info.SupportsExtension("cl_intel_required_subgroup_size")) { + size_t sub_groups_ret_size; + cl_int status = + clGetDeviceInfo(id, 0x4108 /*CL_DEVICE_SUB_GROUP_SIZES_INTEL*/, 0, + nullptr, &sub_groups_ret_size); + if (status == CL_SUCCESS) { + size_t sub_groups_count = sub_groups_ret_size / sizeof(size_t); + std::vector sub_group_sizes(sub_groups_count); + status = + clGetDeviceInfo(id, 0x4108 /*CL_DEVICE_SUB_GROUP_SIZES_INTEL*/, + sub_groups_ret_size, sub_group_sizes.data(), nullptr); if (status == CL_SUCCESS) { - std::vector sub_group_sizes(sub_groups_count); - status = clGetDeviceInfo(id, 0x4108 /*CL_DEVICE_SUB_GROUP_SIZES_INTEL*/, - sizeof(size_t) * sub_groups_count, - sub_group_sizes.data(), nullptr); - if (status == CL_SUCCESS) { - for (int i = 0; i < sub_groups_count; ++i) { - info.supported_subgroup_sizes.push_back(sub_group_sizes[i]); - } + for (int i = 0; i < sub_groups_count; ++i) { + info.supported_subgroup_sizes.push_back(sub_group_sizes[i]); } } } diff --git a/tensorflow/lite/delegates/gpu/common/BUILD b/tensorflow/lite/delegates/gpu/common/BUILD index 0a55ad05a76968..195124f269c7c8 100644 --- a/tensorflow/lite/delegates/gpu/common/BUILD +++ b/tensorflow/lite/delegates/gpu/common/BUILD @@ -44,6 +44,7 @@ cc_library( deps = [ ":data_type", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/status", "@com_google_absl//absl/strings", ], ) diff --git a/tensorflow/lite/delegates/gpu/common/data_type_test.cc b/tensorflow/lite/delegates/gpu/common/data_type_test.cc index a88eb38047eda8..f7ee35c73145dd 100644 --- a/tensorflow/lite/delegates/gpu/common/data_type_test.cc +++ b/tensorflow/lite/delegates/gpu/common/data_type_test.cc @@ -15,7 +15,6 @@ limitations under the License. #include "tensorflow/lite/delegates/gpu/common/data_type.h" -#include #include namespace tflite { diff --git a/tensorflow/lite/delegates/gpu/common/flops_util.cc b/tensorflow/lite/delegates/gpu/common/flops_util.cc index 88d7edad88fc57..3807f6b6a46274 100644 --- a/tensorflow/lite/delegates/gpu/common/flops_util.cc +++ b/tensorflow/lite/delegates/gpu/common/flops_util.cc @@ -15,6 +15,8 @@ limitations under the License. #include "tensorflow/lite/delegates/gpu/common/flops_util.h" +#include "tensorflow/lite/delegates/gpu/common/shape.h" + namespace tflite { namespace gpu { diff --git a/tensorflow/lite/delegates/gpu/common/gpu_info.cc b/tensorflow/lite/delegates/gpu/common/gpu_info.cc index 944202cb3d8561..2627adda13c6bd 100644 --- a/tensorflow/lite/delegates/gpu/common/gpu_info.cc +++ b/tensorflow/lite/delegates/gpu/common/gpu_info.cc @@ -21,6 +21,7 @@ limitations under the License. #include #include +#include "absl/status/status.h" #include "absl/strings/ascii.h" namespace tflite { @@ -981,6 +982,17 @@ bool GpuInfo::SupportsSubGroupWithSize(int sub_group_size) const { return false; } +absl::Status GpuInfo::GetMinSubGroupSize(int& min_sub_group_size) const { + auto begin = supported_subgroup_sizes.begin(); + auto end = supported_subgroup_sizes.end(); + auto min = std::min_element(begin, end); + if (min == end) { + return absl::InternalError("No supported subgroup sizes"); + } + min_sub_group_size = *min; + return absl::OkStatus(); +} + bool GpuInfo::SupportsFloatImage2D(DataType data_type, int channels) const { if (IsApiOpenCl()) { return opencl_info.supported_images_2d.SupportsImage2D(data_type, channels); diff --git a/tensorflow/lite/delegates/gpu/common/gpu_info.h b/tensorflow/lite/delegates/gpu/common/gpu_info.h index 15bdb065f15361..9849e2405d32e2 100644 --- a/tensorflow/lite/delegates/gpu/common/gpu_info.h +++ b/tensorflow/lite/delegates/gpu/common/gpu_info.h @@ -21,6 +21,7 @@ limitations under the License. #include #include "absl/container/flat_hash_set.h" +#include "absl/status/status.h" #include "absl/strings/match.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" @@ -524,6 +525,7 @@ struct GpuInfo { // returns true if device have fixed wave size equal to 32 bool IsWaveSizeEqualTo32() const; bool SupportsSubGroupWithSize(int sub_group_size) const; + absl::Status GetMinSubGroupSize(int& min_sub_group_size) const; bool SupportsFloatImage2D(DataType data_type, int channels) const; bool SupportsExtension(const std::string& extension) const; diff --git a/tensorflow/lite/delegates/gpu/common/tasks/BUILD b/tensorflow/lite/delegates/gpu/common/tasks/BUILD index 01ad443db54ae2..0b9662471277e4 100644 --- a/tensorflow/lite/delegates/gpu/common/tasks/BUILD +++ b/tensorflow/lite/delegates/gpu/common/tasks/BUILD @@ -143,6 +143,7 @@ cc_library( hdrs = ["conv_generic.h"], deps = [ "//tensorflow/lite/delegates/gpu/common:data_type", + "//tensorflow/lite/delegates/gpu/common:gpu_info", "//tensorflow/lite/delegates/gpu/common:operations", "//tensorflow/lite/delegates/gpu/common:shape", "//tensorflow/lite/delegates/gpu/common:status", diff --git a/tensorflow/lite/delegates/gpu/common/tasks/conv_generic.cc b/tensorflow/lite/delegates/gpu/common/tasks/conv_generic.cc index c2a1bd8f539512..1e5cc2f14b9e82 100644 --- a/tensorflow/lite/delegates/gpu/common/tasks/conv_generic.cc +++ b/tensorflow/lite/delegates/gpu/common/tasks/conv_generic.cc @@ -1791,16 +1791,20 @@ ConvGeneric::ConvParams ConvGeneric::GuessBestParams( const int kSubGroupSize = 16; const bool supports_subgroup_size_control = gpu_info.SupportsExtension("cl_intel_required_subgroup_size"); + int min_subgroup_size; + auto min_subgroup_size_status = + gpu_info.GetMinSubGroupSize(min_subgroup_size); if (supports_subgroup_size_control && gpu_info.SupportsSubGroupWithSize(kSubGroupSize)) { conv_params.weights_upload_type = WeightsUploadType::PRIVATE_MEM_SIMD_BROADCAST; conv_params.simd_size = kSubGroupSize; - } else if (gpu_info.opencl_info.IsCLVK()) { - // It will work because of specific driver using subgroup size 16 + } else if (supports_subgroup_size_control && + min_subgroup_size_status.ok()) { conv_params.weights_upload_type = WeightsUploadType::PRIVATE_MEM_SIMD_BROADCAST; - conv_params.simd_size = 16; + conv_params.simd_size = min_subgroup_size; + work_group_size_ = int3(min_subgroup_size, 1, 1); } else { // no support of subgroup size control // only smallest subgroup size (8) can be used safely, otherwise diff --git a/tensorflow/lite/delegates/xnnpack/BUILD b/tensorflow/lite/delegates/xnnpack/BUILD index c4f748280d70ec..46c327528076e8 100644 --- a/tensorflow/lite/delegates/xnnpack/BUILD +++ b/tensorflow/lite/delegates/xnnpack/BUILD @@ -1,3 +1,4 @@ +load("@flatbuffers//:build_defs.bzl", "flatbuffer_cc_library") load("//tensorflow:tensorflow.default.bzl", "get_compatible_with_portable") load("//tensorflow/lite:build_def.bzl", "tflite_copts") load("//tensorflow/lite:special_rules.bzl", "internal_visibility_allowlist", "tflite_portable_test_suite_combined") @@ -214,15 +215,6 @@ cc_library( }), ) -cc_library( - name = "xnnpack_delegate_test", - testonly = True, - hdrs = ["xnnpack_delegate_test.h"], - deps = [ - ":xnnpack_delegate", - ], -) - cc_library( name = "xnnpack_delegate", srcs = ["xnnpack_delegate.cc"], @@ -251,7 +243,9 @@ cc_library( ":tflite_with_xnnpack_qs8", ":tflite_with_xnnpack_qu8", ":tflite_with_xnnpack_transient_indirection_buffer", + ":weight_cache", "//tensorflow/lite:kernel_api", + "//tensorflow/lite:logger", "//tensorflow/lite:minimal_logging", "//tensorflow/lite/c:c_api_types", "//tensorflow/lite/core:subgraph", @@ -289,6 +283,7 @@ cc_library( linkstatic = True, deps = [ ":quantization_util", + ":weight_cache", "//tensorflow/lite:kernel_api", "//tensorflow/lite:minimal_logging", "//tensorflow/lite/c:c_api_types", @@ -323,6 +318,30 @@ cc_library( ], ) +flatbuffer_cc_library( + name = "weight_cache_schema", + srcs = ["weight_cache_schema.fbs"], + compatible_with = get_compatible_with_portable(), + flatc_args = [ + "--gen-mutable", + "--gen-object-api", + ], +) + +cc_library( + name = "weight_cache", + srcs = ["weight_cache.cc"], + hdrs = ["weight_cache.h"], + compatible_with = get_compatible_with_portable(), + deps = [ + ":weight_cache_schema", + "//tensorflow/lite:minimal_logging", + "//tensorflow/lite/c:common", + "@XNNPACK", + "@flatbuffers//:runtime_cc", + ], +) + ################################ Tester classes ################################ cc_library( @@ -2828,4 +2847,21 @@ cc_test( ], ) +cc_test( + name = "weight_cache_test", + srcs = ["weight_cache_test.cc"], + tags = [ + "tflite_disable_mobile_test", # TODO - b/341104412 - enable on Android + ], + deps = [ + ":test_main", + ":weight_cache", + ":weight_cache_schema", + "//tensorflow/lite/c:common", + "@XNNPACK", + "@com_google_googletest//:gtest", + "@flatbuffers//:runtime_cc", + ], +) + tflite_portable_test_suite_combined(combine_conditions = {"deps": [":test_main"]}) diff --git a/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_test.cc b/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_test.cc index 3b83541b81b215..de863e4f1e2125 100644 --- a/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_test.cc +++ b/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_test.cc @@ -27,11 +27,8 @@ namespace tflite { namespace xnnpack { TEST(DynamicallyQuantizedTransposeConvTest, 2x2Stride2) { - auto delegate_options = TfLiteXNNPackDelegateOptionsDefault(); - delegate_options.flags |= - TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING; std::unique_ptr - xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; @@ -55,11 +52,8 @@ TEST(DynamicallyQuantizedTransposeConvTest, 2x2Stride2) { } TEST(DynamicallyQuantizedTransposeConvTest, 3x3Stride2) { - auto delegate_options = TfLiteXNNPackDelegateOptionsDefault(); - delegate_options.flags |= - TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING; std::unique_ptr - xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); @@ -82,11 +76,8 @@ TEST(DynamicallyQuantizedTransposeConvTest, 3x3Stride2) { } TEST(DynamicallyQuantizedTransposeConvTest, 4x4Stride2) { - auto delegate_options = TfLiteXNNPackDelegateOptionsDefault(); - delegate_options.flags |= - TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING; std::unique_ptr - xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; @@ -110,11 +101,8 @@ TEST(DynamicallyQuantizedTransposeConvTest, 4x4Stride2) { } TEST(DynamicallyQuantizedTransposeConvTest, 4x4Stride4) { - auto delegate_options = TfLiteXNNPackDelegateOptionsDefault(); - delegate_options.flags |= - TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING; std::unique_ptr - xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; @@ -138,11 +126,8 @@ TEST(DynamicallyQuantizedTransposeConvTest, 4x4Stride4) { } TEST(DynamicallyQuantizedTransposeConvTest, SmallKernelWithSamePadding) { - auto delegate_options = TfLiteXNNPackDelegateOptionsDefault(); - delegate_options.flags |= - TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING; std::unique_ptr - xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; @@ -169,11 +154,8 @@ TEST(DynamicallyQuantizedTransposeConvTest, SmallKernelWithSamePadding) { } TEST(DynamicallyQuantizedTransposeConvTest, SmallKernelWithValidPadding) { - auto delegate_options = TfLiteXNNPackDelegateOptionsDefault(); - delegate_options.flags |= - TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING; std::unique_ptr - xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); @@ -199,11 +181,8 @@ TEST(DynamicallyQuantizedTransposeConvTest, SmallKernelWithValidPadding) { } TEST(DynamicallyQuantizedTransposeConvTest, StrideWithSamePadding) { - auto delegate_options = TfLiteXNNPackDelegateOptionsDefault(); - delegate_options.flags |= - TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING; std::unique_ptr - xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; @@ -234,11 +213,8 @@ TEST(DynamicallyQuantizedTransposeConvTest, StrideWithSamePadding) { } TEST(DynamicallyQuantizedTransposeConvTest, StrideWithValidPadding) { - auto delegate_options = TfLiteXNNPackDelegateOptionsDefault(); - delegate_options.flags |= - TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING; std::unique_ptr - xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; @@ -272,8 +248,6 @@ TEST(DynamicallyQuantizedTransposeConvTest, MultiThreading) { TfLiteXNNPackDelegateOptions delegate_options = TfLiteXNNPackDelegateOptionsDefault(); delegate_options.num_threads = 2; - delegate_options.flags |= - TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING; std::unique_ptr xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), TfLiteXNNPackDelegateDelete); @@ -308,8 +282,6 @@ TEST(DynamicallyQuantizedTransposeConvTest, MultiThreading) { TEST(DynamicallyQuantizedTransposeConvTest, WeightsCache) { TfLiteXNNPackDelegateOptions delegate_options = TfLiteXNNPackDelegateOptionsDefault(); - delegate_options.flags |= - TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING; std::unique_ptr weights_cache(TfLiteXNNPackDelegateWeightsCacheCreate(), diff --git a/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_tester.cc b/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_tester.cc index bbc7110920ae08..35c1b590c7396a 100644 --- a/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_tester.cc +++ b/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_tester.cc @@ -49,23 +49,19 @@ void DynamicallyQuantizedTransposeConvTester::Test( const std::vector kernel_data = GenerateKernelData(); const std::vector bias_data = GenerateBiasData(); const std::vector kernel_scale_data = GenerateKernelScaleData(); - std::vector drq_buffer = - CreateDRQTfLiteModel(kernel_data, bias_data, kernel_scale_data); - std::vector dequantize_buffer = - CreateDequantizeTfLiteModel(kernel_data, bias_data, kernel_scale_data); - const Model* drq_model = GetModel(drq_buffer.data()); - const Model* dequantize_model = GetModel(dequantize_buffer.data()); + std::vector buffer = + CreateTfLiteModel(kernel_data, bias_data, kernel_scale_data); + const Model* model = GetModel(buffer.data()); std::unique_ptr delegate_interpreter; - ASSERT_EQ( - InterpreterBuilder( - drq_model, ::tflite::ops::builtin::BuiltinOpResolverWithXNNPACK())( - &delegate_interpreter), - kTfLiteOk); + ASSERT_EQ(InterpreterBuilder( + model, ::tflite::ops::builtin::BuiltinOpResolverWithXNNPACK())( + &delegate_interpreter), + kTfLiteOk); std::unique_ptr default_interpreter; ASSERT_EQ( InterpreterBuilder( - dequantize_model, + model, ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())( &default_interpreter), kTfLiteOk); @@ -79,11 +75,11 @@ void DynamicallyQuantizedTransposeConvTester::Test( ASSERT_EQ(delegate_interpreter->outputs().size(), 1); ASSERT_EQ(default_interpreter->outputs().size(), 1); - ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk); - ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk); ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk); + ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk); + if (weights_cache_ != nullptr) { TfLiteXNNPackDelegateWeightsCacheFinalizeHard(weights_cache_); } @@ -110,12 +106,11 @@ void DynamicallyQuantizedTransposeConvTester::Test( int different_output_values = 0; for (size_t i = 0; i < num_output_values; i++) { if (std::abs(default_output_data[i] - xnnpack_output_data[i]) > - 0.1 * std::abs(default_output_data[i])) { + 0.005 * std::abs(default_output_data[i])) { ++different_output_values; } } - - if (different_output_values > 0.055 * num_output_values) { + if (different_output_values > 0.05 * num_output_values) { GTEST_FAIL() << (float)different_output_values / num_output_values * 100.f << "% of output values differ"; } @@ -182,7 +177,7 @@ DynamicallyQuantizedTransposeConvTester::GenerateKernelScaleData() const { return kernel_scale; } -std::vector DynamicallyQuantizedTransposeConvTester::CreateDRQTfLiteModel( +std::vector DynamicallyQuantizedTransposeConvTester::CreateTfLiteModel( const std::vector& filter_data, const std::vector& bias_data, const std::vector& kernel_scale) const { /*************************** Define operator codes **************************/ @@ -297,147 +292,5 @@ std::vector DynamicallyQuantizedTransposeConvTester::CreateDRQTfLiteModel( builder.GetBufferPointer() + builder.GetSize()); } -std::vector -DynamicallyQuantizedTransposeConvTester::CreateDequantizeTfLiteModel( - const std::vector& filter_data, const std::vector& bias_data, - const std::vector& kernel_scale) const { - /*************************** Define operator codes **************************/ - flatbuffers::FlatBufferBuilder builder; - std::vector> operator_codes{ - {CreateOperatorCode(builder, BuiltinOperator_TRANSPOSE_CONV)}}; - const int dequantize_operator_code = operator_codes.size(); - operator_codes.emplace_back( - CreateOperatorCode(builder, BuiltinOperator_DEQUANTIZE)); - - /****************************** Define buffers ******************************/ - std::vector> buffers{ - {CreateBuffer(builder, builder.CreateVector({}))}}; - - int filter_buffer_id = 0; - const int quantized_filter_buffer_id = buffers.size(); - buffers.emplace_back(CreateBuffer( - builder, - builder.CreateVector(reinterpret_cast(filter_data.data()), - sizeof(int8_t) * filter_data.size()))); - - int bias_buffer_id = buffers.size(); - buffers.emplace_back(CreateBuffer( - builder, - builder.CreateVector(reinterpret_cast(bias_data.data()), - sizeof(float) * bias_data.size()))); - const std::array output_shape{ - {BatchSize(), OutputHeight(), OutputWidth(), OutputChannels()}}; - const int output_shape_buffer_id = buffers.size(); - buffers.emplace_back(CreateBuffer( - builder, builder.CreateVector( - reinterpret_cast(output_shape.data()), - sizeof(int32_t) * output_shape.size()))); - - /****************************** Define tensors ******************************/ - const std::vector filter_shape = {OutputChannels(), KernelHeight(), - KernelWidth(), InputChannels()}; - const std::vector bias_shape = {OutputChannels()}; - const std::array input_shape{ - {BatchSize(), InputHeight(), InputWidth(), InputChannels()}}; - - std::vector> tensors; - const int quantized_filter_tensor_id = tensors.size(); - tensors.emplace_back(CreateTensor( - builder, - builder.CreateVector(filter_shape.data(), filter_shape.size()), - /*type=*/TensorType_INT8, - /*buffer=*/quantized_filter_buffer_id, - /*name=*/0, - CreateQuantizationParameters( - builder, /*min=*/0, /*max=*/0, - builder.CreateVector(kernel_scale), - builder.CreateVector( - std::vector(OutputChannels(), 0))))); - - const int input_tensor_id = tensors.size(); - tensors.emplace_back(CreateTensor( - builder, - builder.CreateVector(input_shape.data(), input_shape.size()), - TensorType_FLOAT32)); - - const int filter_tensor_id = tensors.size(); - tensors.emplace_back(CreateTensor( - builder, - builder.CreateVector(filter_shape.data(), filter_shape.size()), - TensorType_FLOAT32, - /*buffer=*/filter_buffer_id)); - - const int bias_tensor_id = tensors.size(); - tensors.emplace_back(CreateTensor( - builder, - builder.CreateVector(bias_shape.data(), bias_shape.size()), - TensorType_FLOAT32, bias_buffer_id)); - - const int output_tensor_id = tensors.size(); - tensors.emplace_back(CreateTensor( - builder, - builder.CreateVector(output_shape.data(), output_shape.size()), - TensorType_FLOAT32)); - - const int output_shape_tensor_id = tensors.size(); - const std::array output_shape_shape{{4}}; - tensors.emplace_back( - CreateTensor(builder, - builder.CreateVector(output_shape_shape.data(), - output_shape_shape.size()), - TensorType_INT32, output_shape_buffer_id)); - - /***************************** Define operators *****************************/ - std::vector> operators; - - const std::array dequantize_filter_inputs{ - {quantized_filter_tensor_id}}; - const std::array dequantize_filter_outputs{{filter_tensor_id}}; - operators.emplace_back(CreateOperator( - builder, /*opcode_index=*/dequantize_operator_code, - builder.CreateVector(dequantize_filter_inputs.data(), - dequantize_filter_inputs.size()), - builder.CreateVector(dequantize_filter_outputs.data(), - dequantize_filter_outputs.size()))); - - std::vector op_inputs{ - {output_shape_tensor_id, filter_tensor_id, input_tensor_id}}; - op_inputs.push_back(bias_tensor_id); - const std::array op_outputs{{output_tensor_id}}; - const flatbuffers::Offset transpose_conv_options = - CreateTransposeConvOptions(builder, Padding(), StrideWidth(), - StrideHeight()); - operators.emplace_back(CreateOperator( - builder, /*opcode_index=*/0, - builder.CreateVector(op_inputs.data(), op_inputs.size()), - builder.CreateVector(op_outputs.data(), op_outputs.size()), - BuiltinOptions_TransposeConvOptions, transpose_conv_options.Union())); - - /****************************** Define subgraph *****************************/ - const std::array subgraph_inputs{{input_tensor_id}}; - const std::array subgraph_outputs{{output_tensor_id}}; - const flatbuffers::Offset subgraph = CreateSubGraph( - builder, builder.CreateVector(tensors.data(), tensors.size()), - builder.CreateVector(subgraph_inputs.data(), - subgraph_inputs.size()), - builder.CreateVector(subgraph_outputs.data(), - subgraph_outputs.size()), - builder.CreateVector(operators.data(), operators.size())); - - const flatbuffers::Offset description = - builder.CreateString("TransposeConv model"); - - const flatbuffers::Offset model_buffer = CreateModel( - builder, TFLITE_SCHEMA_VERSION, - builder.CreateVector(operator_codes.data(), operator_codes.size()), - builder.CreateVector(&subgraph, 1), description, - builder.CreateVector(buffers.data(), buffers.size())); - - builder.Finish(model_buffer); - - return std::vector(builder.GetBufferPointer(), - builder.GetBufferPointer() + builder.GetSize()); -} - } // namespace xnnpack } // namespace tflite diff --git a/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_tester.h b/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_tester.h index e7dc7cd9536608..5c69808c836b33 100644 --- a/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_tester.h +++ b/tensorflow/lite/delegates/xnnpack/dynamically_quantized_transpose_conv_tester.h @@ -183,11 +183,7 @@ class DynamicallyQuantizedTransposeConvTester { std::vector GenerateKernelData() const; std::vector GenerateBiasData() const; std::vector GenerateKernelScaleData() const; - std::vector CreateDRQTfLiteModel( - const std::vector& filter_data, - const std::vector& bias_data, - const std::vector& kernel_scale) const; - std::vector CreateDequantizeTfLiteModel( + std::vector CreateTfLiteModel( const std::vector& filter_data, const std::vector& bias_data, const std::vector& kernel_scale) const; diff --git a/tensorflow/lite/delegates/xnnpack/weight_cache.cc b/tensorflow/lite/delegates/xnnpack/weight_cache.cc new file mode 100644 index 00000000000000..aa7307386ff2cd --- /dev/null +++ b/tensorflow/lite/delegates/xnnpack/weight_cache.cc @@ -0,0 +1,523 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/delegates/xnnpack/weight_cache.h" + +#include +#include + +#if defined(_MSC_VER) +#include +#define F_OK 0 +#else +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "xnnpack.h" // from @XNNPACK +#include "flatbuffers/base.h" // from @flatbuffers +#include "flatbuffers/flatbuffer_builder.h" // from @flatbuffers +#include "flatbuffers/verifier.h" // from @flatbuffers +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h" +#include "tensorflow/lite/logger.h" +#include "tensorflow/lite/minimal_logging.h" + +#define XNNPACK_ABORT_CHECK(TEST, ...) \ + if (!(TEST)) { \ + TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, __VA_ARGS__); \ + std::abort(); \ + } + +namespace tflite::xnnpack { + +namespace { +constexpr size_t kMinAlignment = 64; + +template +class ScopeGuard { + public: + explicit ScopeGuard(F&& callback) : callback_(std::forward(callback)) {} + ~ScopeGuard() { + if (active_) { + callback_(); + } + } + + void Deactivate() { active_ = false; } + + private: + F callback_; + bool active_ = true; +}; + +template +ScopeGuard(F&&) -> ScopeGuard; + +// Returns true if the given path exists. +[[nodiscard]] +bool FileExists(const char* path) { + return access(path, F_OK) != -1; +} + +} // namespace + +void swap(MMapHandle& a, MMapHandle& b) { + using std::swap; + swap(a.size_, b.size_); + swap(a.data_, b.data_); +} + +MMapHandle::~MMapHandle() { UnMap(); } + +MMapHandle::MMapHandle(MMapHandle&& other) { swap(*this, other); } + +MMapHandle& MMapHandle::operator=(MMapHandle&& other) { + swap(*this, other); + return *this; +} + +bool MMapHandle::Map(const char* path) { + this->UnMap(); + + const int fd = open(path, O_RDONLY); + if (fd == -1) { + TFLITE_LOG_PROD( + tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: could not open file to mmap ('%s'): %s.", path, + strerror(errno)) + return false; + } + + const ScopeGuard close_fd_on_return([&fd] { + if (fd >= 0) { + close(fd); + } + }); + + struct stat file_stats; + if (fstat(fd, &file_stats)) { + TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: could not access file stats to get " + "size ('%s'): %s.", + path, strerror(errno)) + return false; + } + + size_ = file_stats.st_size; +#if defined(_MSC_VER) + data_ = new uint8_t[size_]; + { + uint8_t* data_reader = data_; + size_t remaining_bytes = size_; + while (remaining_bytes > 0) { + const auto bytes = read(fd, data_reader, remaining_bytes); + if (bytes == -1) { + TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: could not read file ('%s'): %s.", + path, strerror(errno)) + UnMap(); + return false; + } + remaining_bytes -= bytes; + data_reader += bytes; + } + } +#else + data_ = static_cast( + mmap(/*addr=*/nullptr, size_, PROT_READ, MAP_SHARED, fd, /*offset=*/0)); + if (data_ == MAP_FAILED) { + TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: could not mmap file (%s): %s.", path, + strerror(errno)) + data_ = nullptr; + size_ = 0; + return false; + } +#endif + + return true; +} + +void MMapHandle::UnMap() { + if (data_) { +#if defined(_MSC_VER) + delete[] data_; +#else + munmap(data_, size_); +#endif + data_ = nullptr; + size_ = 0; + } +} + +void* WeightCacheBuilder::Reserve(size_t size) { + size_t offset = buffer_data_.size(); + const size_t misalign = offset % kMinAlignment; + if (misalign) { + size += kMinAlignment - misalign; + offset += kMinAlignment - misalign; + } + buffer_data_.resize(buffer_data_.size() + size); + return buffer_data_.data() + offset; +} + +bool WeightCacheBuilder::SpanIsWithinBuffer(const void* ptr, + uint64_t size) const { + const uintptr_t buf_begin = reinterpret_cast(buffer_data_.data()); + const uintptr_t buf_end = buf_begin + buffer_data_.size(); + const uintptr_t ptr_begin = reinterpret_cast(ptr); + const uintptr_t ptr_end = ptr_begin + size; + return ptr_begin >= buf_begin && ptr_begin <= buf_end && + ptr_end >= buf_begin && ptr_end <= buf_end; +} + +BufferLocation WeightCacheBuilder::Append(PackIdentifier pack_id, + const void* data, uint64_t size) { + const void* append_data = data; + if (!SpanIsWithinBuffer(data, size)) { + void* reserved_data = Reserve(size); + std::memcpy(reserved_data, data, size); + append_data = reserved_data; + } + BufferLocation loc{.offset = reinterpret_cast(append_data) - + reinterpret_cast(buffer_data_.data()), + .size = size}; + schema_.buffers.push_back(std::make_unique( + cache::schema::BufferT{.packing_algorithm_id = pack_id.pack_algorithm_id, + .weights_id = pack_id.weights_id, + .bias_id = pack_id.bias_id, + .offset = loc.offset, + .size = loc.size})); + return loc; +} + +bool WeightCacheBuilder::ShouldWrite() const { return !buffer_data_.empty(); } + +namespace { + +bool WriteData(const int fd, const uint8_t* data, size_t size, + const char* const file_path, const char* step_description) { + for (size_t bytes = 0; bytes < size;) { + const auto written_bytes = write(fd, data + bytes, size - bytes); + if (written_bytes == -1) { + TFLITE_LOG_PROD( + tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: file write incomplete (%s). %s: %s.", + file_path, step_description, strerror(errno)) + } + bytes += written_bytes; + } + + return true; +} + +} // namespace + +bool WeightCacheBuilder::Write(const char* path) { + const int fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); + if (fd == -1) { + TFLITE_LOG_PROD( + tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: could not open cache file ('%s') for " + "writing: %s.", + path, strerror(errno)) + return false; + } + + const ScopeGuard close_fd_on_return([&fd] { + if (fd >= 0) { + close(fd); + } + }); + + flatbuffers::FlatBufferBuilder builder; + // Add a fake size and the base offset to mutate them afterwards. Otherwise + // space for it won't be added to the flatbuffer. + schema_.flatbuffer_size = 1; + schema_.base_offset = 1; + FinishPackedWeightsBuffer( + builder, cache::schema::PackedWeights::Pack(builder, &schema_)); + + // Mutate the flatbuffer size and base offset fields. + auto* mutable_packed_weights = + cache::schema::GetMutablePackedWeights(builder.GetBufferPointer()); + mutable_packed_weights->mutate_flatbuffer_size(builder.GetSize()); + const size_t misalign = builder.GetSize() % kMinAlignment; + const size_t alignment_offset = misalign ? kMinAlignment - misalign : 0; + mutable_packed_weights->mutate_base_offset(builder.GetSize() + + alignment_offset); + + // Write the flatbuffer which serves as a header to index the following + // data. + if (!WriteData(fd, builder.GetBufferPointer(), builder.GetSize(), path, + "Header")) { + return false; + } + // Add some padding so that the cache file can be mmaped and the buffers + // stay aligned correctly. + const uint8_t fill[kMinAlignment] = {0}; + if (!WriteData(fd, fill, alignment_offset, path, "Alignment padding")) { + return false; + } + // Write the actual buffer data. + if (!WriteData(fd, buffer_data_.data(), buffer_data_.size(), path, + "Buffer data")) { + return false; + } + TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO, + "XNNPack weight cache: written to '%s'.", path); + return true; +} + +MMapWeightCacheProvider::MMapWeightCacheProvider( + MMapWeightCacheProvider&& other) { + *this = std::move(other); +} + +MMapWeightCacheProvider& MMapWeightCacheProvider::operator=( + MMapWeightCacheProvider&& other) { + using std::swap; + swap(cache_provider_, other.cache_provider_); + // The contexts need to keep pointing to their owning object. + cache_provider_.context = this; + other.cache_provider_.context = &other; + swap(file_path_, other.file_path_); + swap(buffer_address_to_identifier_, other.buffer_address_to_identifier_); + swap(cache_key_to_offset_, other.cache_key_to_offset_); + swap(mmap_handle_, other.mmap_handle_); + swap(mmap_buffer_base_offset_, other.mmap_buffer_base_offset_); + swap(builder_, other.builder_); + return *this; +} + +void MMapWeightCacheProvider::SetFilePath(const char* path) { + XNNPACK_ABORT_CHECK( + !IsFinalized(), + "Cannot change the path of a cache that has already been loaded."); + // We try to keep file_path_'s data as stable as possible. Don't overwrite + // if the path hasn't changed. + if (file_path_ != path) { + file_path_ = path; + } +} + +bool MMapWeightCacheProvider::Load(const std::string& path) { + SetFilePath(path.c_str()); + return Load(); +} + +bool MMapWeightCacheProvider::Load() { + XNNPACK_ABORT_CHECK(!file_path_.empty(), + "Path wasn't provided to weight cache provider."); + mmap_buffer_base_offset_ = 0; + cache_key_to_offset_.clear(); + + if (!FileExists(file_path_.c_str())) { + TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, + "XNNPack weight cache: could not load '%s': %s.", + file_path_.c_str(), strerror(errno)); + return false; + } + + if (!mmap_handle_.Map(file_path_.c_str())) { + return false; + } + + // Verifiy the flabuffer part of the file. + const size_t verifier_size = + std::min(mmap_handle_.size(), + static_cast(FLATBUFFERS_MAX_BUFFER_SIZE - 1)); + flatbuffers::Verifier verifier(mmap_handle_.data(), verifier_size); + if (!cache::schema::VerifyPackedWeightsBuffer(verifier)) { + TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: header validation failed."); + return false; + } + + // Load flatbuffer. + const cache::schema::PackedWeights* packed_weights = + cache::schema::GetPackedWeights(mmap_handle_.data()); + if (!packed_weights) { + TFLITE_LOG_PROD( + tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: could not get packed weights from flatbuffer."); + return false; + } + mmap_buffer_base_offset_ = packed_weights->base_offset(); + if (const auto buffers = packed_weights->buffers(); buffers) { + for (auto* buffer : *buffers) { + if (!buffer) { + TFLITE_LOG_PROD( + tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: Invalid buffer address in buffer list."); + return false; + } + cache_key_to_offset_.emplace( + PackIdentifier{.pack_algorithm_id = buffer->packing_algorithm_id(), + .weights_id = buffer->weights_id(), + .bias_id = buffer->bias_id()}, + BufferLocation{.offset = buffer->offset(), .size = buffer->size()}); + } + } + return true; +} + +void MMapWeightCacheProvider::MapTensorIdentifiers( + const TfLiteTensor* tensors, const size_t size, + const std::unordered_map& tensor_index_to_identifier) { + for (const auto [index, identifier] : tensor_index_to_identifier) { + XNNPACK_ABORT_CHECK(index < size, + "Tensor index corresponds to a non existing tensor."); + buffer_address_to_identifier_[tensors[index].data.data] = identifier; + } +} + +size_t MMapWeightCacheProvider::LookUp( + const xnn_weights_cache_look_up_key* cache_key) { + if (!cache_key) { + TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: a null cache key was provided."); + return SIZE_MAX; + } + const PackIdentifier pack_id = BuildPackIdentifier(*cache_key); + if (auto offset_it = cache_key_to_offset_.find(pack_id); + offset_it != cache_key_to_offset_.end()) { + return offset_it->second.offset; + } + return SIZE_MAX; +} + +void* MMapWeightCacheProvider::ReserveSpace(size_t size) { + XNNPACK_ABORT_CHECK(!IsFinalized(), + "Cannot reserve space in a finalized cache."); + return builder_.Reserve(size); +} + +size_t MMapWeightCacheProvider::LookUpOrInsert( + const xnn_weights_cache_look_up_key* cache_key, void* ptr, size_t size) { + XNNPACK_ABORT_CHECK(cache_key, "A null cache key was provided."); + + const PackIdentifier pack_id = BuildPackIdentifier(*cache_key); + if (auto offset_it = cache_key_to_offset_.find(pack_id); + offset_it != cache_key_to_offset_.end()) { + return offset_it->second.offset; + } + + XNNPACK_ABORT_CHECK(!IsFinalized(), + "Cannot insert a buffer in a finalized cache."); + + const BufferLocation location = builder_.Append(pack_id, ptr, size); + cache_key_to_offset_.emplace(pack_id, location); + return location.offset; +} + +void* MMapWeightCacheProvider::OffsetToAddr(const size_t offset) { + // While the cache is being built, the buffer could grow and need to be + // reallocated so we cannot ensure pointer stability. + XNNPACK_ABORT_CHECK( + IsFinalized(), + "Cannot get the address of a buffer in a non finalized cache."); + return mmap_handle_.data() + mmap_buffer_base_offset_ + offset; +} + +void MMapWeightCacheProvider::Release() { + buffer_address_to_identifier_.clear(); + cache_key_to_offset_.clear(); + mmap_handle_ = MMapHandle(); + mmap_buffer_base_offset_ = 0; + builder_ = WeightCacheBuilder(); +} + +bool MMapWeightCacheProvider::Finalize() { + if (IsFinalized()) { + return true; + } + if (file_path_.empty()) { + TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, + "XNNPack weight cache: file path wasn't set. Cannot " + "finalize the cache."); + return false; + } + if (!builder_.Write(file_path_.c_str())) { + return false; + } + builder_ = WeightCacheBuilder(); + + return Load(); +} + +bool MMapWeightCacheProvider::IsFinalized() const { + return mmap_handle_.IsMapped(); +} + +size_t MMapWeightCacheProvider::look_up( + void* context, const xnn_weights_cache_look_up_key* cache_key) { + return reinterpret_cast(context)->LookUp(cache_key); +} + +void* MMapWeightCacheProvider::reserve_space(void* context, size_t n) { + return reinterpret_cast(context)->ReserveSpace(n); +} + +size_t MMapWeightCacheProvider::look_up_or_insert( + void* context, const xnn_weights_cache_look_up_key* cache_key, void* ptr, + size_t size) { + return reinterpret_cast(context)->LookUpOrInsert( + cache_key, ptr, size); +} + +bool MMapWeightCacheProvider::is_finalized(void* context) { + return reinterpret_cast(context)->IsFinalized(); +} + +void* MMapWeightCacheProvider::offset_to_addr(void* context, size_t offset) { + return reinterpret_cast(context)->OffsetToAddr( + offset); +} + +enum xnn_status MMapWeightCacheProvider::delete_cache(void* context) { + reinterpret_cast(context)->Release(); + return xnn_status_success; +} + +PackIdentifier MMapWeightCacheProvider::BuildPackIdentifier( + const xnn_weights_cache_look_up_key& key) { + const auto get_buffer_id = [&](const void* buffer) -> size_t { + if (buffer) { + const auto identifier_it = buffer_address_to_identifier_.find(buffer); + XNNPACK_ABORT_CHECK(identifier_it != buffer_address_to_identifier_.end(), + "Unknown constant buffer passed to HashCacheKey."); + return identifier_it->second; + } + return PackIdentifier::kNoId; + }; + return PackIdentifier{.pack_algorithm_id = key.seed, + .weights_id = get_buffer_id(key.kernel), + .bias_id = get_buffer_id(key.bias)}; +} + +} // namespace tflite::xnnpack diff --git a/tensorflow/lite/delegates/xnnpack/weight_cache.h b/tensorflow/lite/delegates/xnnpack/weight_cache.h new file mode 100644 index 00000000000000..0eb66308a37d85 --- /dev/null +++ b/tensorflow/lite/delegates/xnnpack/weight_cache.h @@ -0,0 +1,307 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_DELEGATES_XNNPACK_WEIGHT_CACHE_H_ +#define TENSORFLOW_LITE_DELEGATES_XNNPACK_WEIGHT_CACHE_H_ + +#include +#include +#include +#include +#include +#include + +#include "xnnpack.h" // from @XNNPACK +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h" + +// WARNING: the interface in this file is still under experimentation and WILL +// CHANGE. Do not rely on it. + +// TFLite doesn't use absl hashing utilities. + +namespace tflite { +namespace xnnpack { + +struct PackIdentifier { + enum { kNoId = SIZE_MAX }; + uint64_t pack_algorithm_id = kNoId; + uint64_t weights_id = kNoId; + uint64_t bias_id = kNoId; + + friend bool operator==(const PackIdentifier& a, const PackIdentifier& b) { + return a.pack_algorithm_id == b.pack_algorithm_id && + a.weights_id == b.weights_id && a.bias_id == b.bias_id; + } + + struct Hash { + size_t operator()(const PackIdentifier& p) const { + std::hash hasher; + return hasher(p.pack_algorithm_id) ^ hasher(p.weights_id) ^ + hasher(p.bias_id); + } + }; +}; + +struct BufferLocation { + uint64_t offset; + uint64_t size; +}; + +// Handles MMap allocations lifetime. +// +// When mapped, provides a view over the allocation for convenience. +// +// WARNING: the interface in this file is still under experimentation and WILL +// CHANGE. Do not rely on it. +class MMapHandle { + public: + using value_type = uint8_t; + + MMapHandle() = default; + ~MMapHandle(); + MMapHandle(const MMapHandle&) = delete; + MMapHandle& operator=(const MMapHandle&) = delete; + MMapHandle(MMapHandle&&); + MMapHandle& operator=(MMapHandle&&); + + // Maps the file at the given path. + [[nodiscard /*Mapping a file can fail.*/]] + bool Map(const char* path); + + // Unmaps an existing mapping. + void UnMap(); + + // Returns true if a mapping exists. + bool IsMapped() const { return data_ != nullptr; } + + // Returns the mapping buffer. + uint8_t* data() { return data_; } + + // Returns the mapping buffer. + const uint8_t* data() const { return data_; } + + // Returns the mapping size in bytes. + size_t size() const { return size_; } + + uint8_t* begin() { return data(); } + + const uint8_t* begin() const { return data(); } + + uint8_t* end() { return data() + size(); } + + const uint8_t* end() const { return data() + size(); } + + friend void swap(MMapHandle& a, MMapHandle& b); + + private: + size_t size_ = 0; + uint8_t* data_ = nullptr; +}; + +// Provides storage to write the packed buffers to and saves those to disk. +// +// WARNING: the interface in this file is still under experimentation and WILL +// CHANGE. Do not rely on it. +class WeightCacheBuilder { + public: + // Reserves space in the data buffer for the required size in bytes and + // returns the address of that space. + // + // Sets `last_reserve` to the offset from `buffer_data_`'s start and `n`. + // + // A call to `Reserve` should alway be followed by a call to `Append`. + [[nodiscard /*The pointer to reserved space should be used.*/]] + void* Reserve(size_t size); + + // Adds a buffer to the cache. + // + // The buffer space must have been reserved before using `Reserve`. If not, a + // new call to `Reserve` will be done and the data will be copied over. + [[nodiscard /*The location to the appended data should be saved.*/]] + BufferLocation Append(PackIdentifier pack_id, const void* data, + uint64_t size); + + // Checks whether this builder has data that needs to be written to disk. + bool ShouldWrite() const; + + // Writes the flatbuffer to disk. + [[nodiscard /*Writing the weight cache can fail.*/]] + bool Write(const char* path); + + // Helper for testing. + // + // WARNING: this exposes class implementation details for testing purposes and + // may be removed at any time. + const std::vector& BufferData() const { return buffer_data_; } + + private: + bool SpanIsWithinBuffer(const void* ptr, uint64_t size) const; + + cache::schema::PackedWeightsT schema_; + std::vector buffer_data_; +}; + +// Allows XNNPack to directly load packed weights from disk instead of having to +// repack them every time. +// +// XNNPack kernels do not have knowledge of the TFLite context. The only thing +// they can access is the buffers address. We rely on the fact that the address +// provided by TFLite is unique in order to find out the buffer identifier. +// +// To use the cache you need to: +// +// - Map the buffer addresses to their identifier with `MapTensorIdentifiers` +// - Load the cache file. +// - Finalize the cache before calling the run functions of XNNPack (setup and +// reshape are ok). +class MMapWeightCacheProvider { + public: + MMapWeightCacheProvider() = default; + MMapWeightCacheProvider(const MMapWeightCacheProvider&) = delete; + MMapWeightCacheProvider& operator=(const MMapWeightCacheProvider&) = delete; + MMapWeightCacheProvider(MMapWeightCacheProvider&&); + MMapWeightCacheProvider& operator=(MMapWeightCacheProvider&&); + + // Changes the file path to save the cache to. + // + // WARNING: Can only be called if the cache isn't finalized. + void SetFilePath(const char* file_path); + + const std::string& GetFilePath() const { return file_path_; } + + // Set the weight file path and loads it. + [[nodiscard /*Loading a cache file may fail.*/]] + bool Load(const std::string& path); + + // Loads the weight cache previouslt set with `SetFilePath`. + [[nodiscard /*Loading cache data may fail.*/]] + bool Load(); + + // Creates the tensor map. + void MapTensorIdentifiers( + const TfLiteTensor* tensors, size_t size, + const std::unordered_map& tensor_index_to_identifier); + + // Returns the offset of the buffer identified by `cache_key`. + // + // If the buffer isn't found, return SIZE_MAX. + [[nodiscard]] + size_t LookUp(const xnn_weights_cache_look_up_key* cache_key); + + // Reserves space for a buffer of given size and returns a pointer to it. + // + // The buffer data should be filled and `LookUpOrInsert` should be immediately + // called. + [[nodiscard]] + void* ReserveSpace(size_t size); + + // Returns the offset of the buffer identified by `cache_key`. If the lookup + // fails, inserts the span `[ptr, ptr+size)`. + // + // This should be called after ReserveSpace and `ptr` should be the result of + // that call with the given `size`. + // + // WARNING: The cache key cannot be null. + [[nodiscard]] + size_t LookUpOrInsert(const xnn_weights_cache_look_up_key* cache_key, + void* ptr, size_t size); + + // Gets the pointer to the buffer at the given offset. + // + // WARNING: This requires the buffer to be finalized. + // WARNING: This does not check the validity of the passed offset. + void* OffsetToAddr(size_t offset); + + // Releases the weight cache's memory. + void Release(); + + // Ensures that the cache is ready. + // + // If the cache file already exists, this is a no-op. Otherwise, this writes + // the file to disk and reloads it. + [[nodiscard /*Writing the cache file may fail.*/]] + bool Finalize(); + + // Checks whether the cache is ready to be used. + bool IsFinalized() const; + + // Returns true if any weights have been added to the underlying builder. + bool IsBuilding() const { return !IsFinalized() && !file_path_.empty(); }; + + // Returns true if a file is mapped or a file path is set. + bool IsActive() const { return IsFinalized() || !file_path_.empty(); }; + + // Returns the cache provider expected by XNNPack. + xnn_weights_cache_provider& GetCacheProvider() { return cache_provider_; } + + // C interface: `xnn_weights_cache_provider` callback. + static size_t look_up(void* context, + const xnn_weights_cache_look_up_key* cache_key); + + // C interface: `xnn_weights_cache_provider` callback. + static void* reserve_space(void* context, size_t n); + + // C interface: `xnn_weights_cache_provider` callback. + static size_t look_up_or_insert( + void* context, const xnn_weights_cache_look_up_key* cache_key, void* ptr, + size_t size); + + // C interface: `xnn_weights_cache_provider` callback. + static bool is_finalized(void* context); + + // C interface: `xnn_weights_cache_provider` callback. + static void* offset_to_addr(void* context, size_t offset); + + // C interface: `xnn_weights_cache_provider` callback. + static enum xnn_status delete_cache(void* context); + + private: + // Hashes a cache key to lookup in `cache_key_to_identifier_`. + PackIdentifier BuildPackIdentifier(const xnn_weights_cache_look_up_key& key); + + // Cache provider implementation for XNNPack. + xnn_weights_cache_provider cache_provider_{ + .context = this, + .look_up = MMapWeightCacheProvider::look_up, + .reserve_space = MMapWeightCacheProvider::reserve_space, + .look_up_or_insert = MMapWeightCacheProvider::look_up_or_insert, + .is_finalized = MMapWeightCacheProvider::is_finalized, + .offset_to_addr = MMapWeightCacheProvider::offset_to_addr, + .delete_cache = MMapWeightCacheProvider::delete_cache}; + + // Path to the cache file. + std::string file_path_; + + // Maps buffer addresses to buffer identifiers. + std::unordered_map buffer_address_to_identifier_; + + // Maps cache request hashes to the buffer identifier. + std::unordered_multimap + cache_key_to_offset_; + + // MMap allocation handler. + MMapHandle mmap_handle_; + + // The offset to the first buffer data in the MMap allocation. + size_t mmap_buffer_base_offset_; + + // Used to build the cache. + WeightCacheBuilder builder_; +}; + +} // namespace xnnpack +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_XNNPACK_WEIGHT_CACHE_H_ diff --git a/tensorflow/lite/delegates/xnnpack/weight_cache_schema.fbs b/tensorflow/lite/delegates/xnnpack/weight_cache_schema.fbs new file mode 100644 index 00000000000000..0658054f21c07e --- /dev/null +++ b/tensorflow/lite/delegates/xnnpack/weight_cache_schema.fbs @@ -0,0 +1,52 @@ +// Copyright 2024 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is a list of buffers with identifiers, to host the CPU-specific cache on disk. +namespace tflite.xnnpack.cache.schema; + +// Schema version. +file_identifier "V001"; +// File extension of written files. +file_extension "xnn_weights"; + +table Buffer { + // To uniquely identify a packed buffer we need to keep track of the packing + // algorithm and of the buffers that were used to generate it. + packing_algorithm_id: uint64; + weights_id: uint64; + bias_id: uint64; + + /// The buffer data is appended after the flatbuffer to bypass 2GB file size + /// limitation. The offset is calculated relative to the base offset. + /// (i.e. beginning of the file + base_offset). + offset: uint64; + + /// Size of the buffer in bytes. + size: uint64; +} + +table PackedWeights { + /// A list of buffers. + buffers: [Buffer]; + + /// The serialized file is `flatbuffer_size` of bytes representing + /// `NamedBuffers` appended with a blob representing the buffer content. + flatbuffer_size: uint64; + + /// Defines the base offset for the data appended to the file. That offset + /// may be needed to guarantee data alignment. + base_offset:uint64; +} + +root_type PackedWeights; diff --git a/tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h b/tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h new file mode 100755 index 00000000000000..fa5d30a4cdae65 --- /dev/null +++ b/tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h @@ -0,0 +1,422 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_WEIGHTCACHESCHEMA_TFLITE_XNNPACK_CACHE_SCHEMA_H_ +#define FLATBUFFERS_GENERATED_WEIGHTCACHESCHEMA_TFLITE_XNNPACK_CACHE_SCHEMA_H_ + +#include "flatbuffers/flatbuffers.h" + +// Ensure the included flatbuffers.h is the same version as when this file was +// generated, otherwise it may not be compatible. +static_assert(FLATBUFFERS_VERSION_MAJOR == 24 && + FLATBUFFERS_VERSION_MINOR == 3 && + FLATBUFFERS_VERSION_REVISION == 25, + "Non-compatible flatbuffers version included"); + +namespace tflite { +namespace xnnpack { +namespace cache { +namespace schema { + +struct Buffer; +struct BufferBuilder; +struct BufferT; + +struct PackedWeights; +struct PackedWeightsBuilder; +struct PackedWeightsT; + +struct BufferT : public ::flatbuffers::NativeTable { + typedef Buffer TableType; + uint64_t packing_algorithm_id = 0; + uint64_t weights_id = 0; + uint64_t bias_id = 0; + uint64_t offset = 0; + uint64_t size = 0; +}; + +struct Buffer FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BufferT NativeTableType; + typedef BufferBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PACKING_ALGORITHM_ID = 4, + VT_WEIGHTS_ID = 6, + VT_BIAS_ID = 8, + VT_OFFSET = 10, + VT_SIZE = 12 + }; + uint64_t packing_algorithm_id() const { + return GetField(VT_PACKING_ALGORITHM_ID, 0); + } + bool mutate_packing_algorithm_id(uint64_t _packing_algorithm_id = 0) { + return SetField(VT_PACKING_ALGORITHM_ID, _packing_algorithm_id, 0); + } + uint64_t weights_id() const { + return GetField(VT_WEIGHTS_ID, 0); + } + bool mutate_weights_id(uint64_t _weights_id = 0) { + return SetField(VT_WEIGHTS_ID, _weights_id, 0); + } + uint64_t bias_id() const { + return GetField(VT_BIAS_ID, 0); + } + bool mutate_bias_id(uint64_t _bias_id = 0) { + return SetField(VT_BIAS_ID, _bias_id, 0); + } + /// The buffer data is appended after the flatbuffer to bypass 2GB file size + /// limitation. The offset is calculated relative to the base offset. + /// (i.e. beginning of the file + base_offset). + uint64_t offset() const { + return GetField(VT_OFFSET, 0); + } + bool mutate_offset(uint64_t _offset = 0) { + return SetField(VT_OFFSET, _offset, 0); + } + /// Size of the buffer in bytes. + uint64_t size() const { + return GetField(VT_SIZE, 0); + } + bool mutate_size(uint64_t _size = 0) { + return SetField(VT_SIZE, _size, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PACKING_ALGORITHM_ID, 8) && + VerifyField(verifier, VT_WEIGHTS_ID, 8) && + VerifyField(verifier, VT_BIAS_ID, 8) && + VerifyField(verifier, VT_OFFSET, 8) && + VerifyField(verifier, VT_SIZE, 8) && + verifier.EndTable(); + } + BufferT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BufferT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BufferBuilder { + typedef Buffer Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_packing_algorithm_id(uint64_t packing_algorithm_id) { + fbb_.AddElement(Buffer::VT_PACKING_ALGORITHM_ID, packing_algorithm_id, 0); + } + void add_weights_id(uint64_t weights_id) { + fbb_.AddElement(Buffer::VT_WEIGHTS_ID, weights_id, 0); + } + void add_bias_id(uint64_t bias_id) { + fbb_.AddElement(Buffer::VT_BIAS_ID, bias_id, 0); + } + void add_offset(uint64_t offset) { + fbb_.AddElement(Buffer::VT_OFFSET, offset, 0); + } + void add_size(uint64_t size) { + fbb_.AddElement(Buffer::VT_SIZE, size, 0); + } + explicit BufferBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateBuffer( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint64_t packing_algorithm_id = 0, + uint64_t weights_id = 0, + uint64_t bias_id = 0, + uint64_t offset = 0, + uint64_t size = 0) { + BufferBuilder builder_(_fbb); + builder_.add_size(size); + builder_.add_offset(offset); + builder_.add_bias_id(bias_id); + builder_.add_weights_id(weights_id); + builder_.add_packing_algorithm_id(packing_algorithm_id); + return builder_.Finish(); +} + +::flatbuffers::Offset CreateBuffer(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PackedWeightsT : public ::flatbuffers::NativeTable { + typedef PackedWeights TableType; + std::vector> buffers{}; + uint64_t flatbuffer_size = 0; + uint64_t base_offset = 0; + PackedWeightsT() = default; + PackedWeightsT(const PackedWeightsT &o); + PackedWeightsT(PackedWeightsT&&) FLATBUFFERS_NOEXCEPT = default; + PackedWeightsT &operator=(PackedWeightsT o) FLATBUFFERS_NOEXCEPT; +}; + +struct PackedWeights FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef PackedWeightsT NativeTableType; + typedef PackedWeightsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BUFFERS = 4, + VT_FLATBUFFER_SIZE = 6, + VT_BASE_OFFSET = 8 + }; + /// A list of buffers. + const ::flatbuffers::Vector<::flatbuffers::Offset> *buffers() const { + return GetPointer> *>(VT_BUFFERS); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_buffers() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_BUFFERS); + } + /// The serialized file is `flatbuffer_size` of bytes representing + /// `NamedBuffers` appended with a blob representing the buffer content. + uint64_t flatbuffer_size() const { + return GetField(VT_FLATBUFFER_SIZE, 0); + } + bool mutate_flatbuffer_size(uint64_t _flatbuffer_size = 0) { + return SetField(VT_FLATBUFFER_SIZE, _flatbuffer_size, 0); + } + /// Defines the base offset for the data appended to the file. That offset + /// may be needed to guarantee data alignment. + uint64_t base_offset() const { + return GetField(VT_BASE_OFFSET, 0); + } + bool mutate_base_offset(uint64_t _base_offset = 0) { + return SetField(VT_BASE_OFFSET, _base_offset, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BUFFERS) && + verifier.VerifyVector(buffers()) && + verifier.VerifyVectorOfTables(buffers()) && + VerifyField(verifier, VT_FLATBUFFER_SIZE, 8) && + VerifyField(verifier, VT_BASE_OFFSET, 8) && + verifier.EndTable(); + } + PackedWeightsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PackedWeightsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PackedWeightsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PackedWeightsBuilder { + typedef PackedWeights Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_buffers(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> buffers) { + fbb_.AddOffset(PackedWeights::VT_BUFFERS, buffers); + } + void add_flatbuffer_size(uint64_t flatbuffer_size) { + fbb_.AddElement(PackedWeights::VT_FLATBUFFER_SIZE, flatbuffer_size, 0); + } + void add_base_offset(uint64_t base_offset) { + fbb_.AddElement(PackedWeights::VT_BASE_OFFSET, base_offset, 0); + } + explicit PackedWeightsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreatePackedWeights( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> buffers = 0, + uint64_t flatbuffer_size = 0, + uint64_t base_offset = 0) { + PackedWeightsBuilder builder_(_fbb); + builder_.add_base_offset(base_offset); + builder_.add_flatbuffer_size(flatbuffer_size); + builder_.add_buffers(buffers); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreatePackedWeightsDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<::flatbuffers::Offset> *buffers = nullptr, + uint64_t flatbuffer_size = 0, + uint64_t base_offset = 0) { + auto buffers__ = buffers ? _fbb.CreateVector<::flatbuffers::Offset>(*buffers) : 0; + return tflite::xnnpack::cache::schema::CreatePackedWeights( + _fbb, + buffers__, + flatbuffer_size, + base_offset); +} + +::flatbuffers::Offset CreatePackedWeights(::flatbuffers::FlatBufferBuilder &_fbb, const PackedWeightsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline BufferT *Buffer::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BufferT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Buffer::UnPackTo(BufferT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = packing_algorithm_id(); _o->packing_algorithm_id = _e; } + { auto _e = weights_id(); _o->weights_id = _e; } + { auto _e = bias_id(); _o->bias_id = _e; } + { auto _e = offset(); _o->offset = _e; } + { auto _e = size(); _o->size = _e; } +} + +inline ::flatbuffers::Offset Buffer::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateBuffer(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateBuffer(::flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _packing_algorithm_id = _o->packing_algorithm_id; + auto _weights_id = _o->weights_id; + auto _bias_id = _o->bias_id; + auto _offset = _o->offset; + auto _size = _o->size; + return tflite::xnnpack::cache::schema::CreateBuffer( + _fbb, + _packing_algorithm_id, + _weights_id, + _bias_id, + _offset, + _size); +} + +inline PackedWeightsT::PackedWeightsT(const PackedWeightsT &o) + : flatbuffer_size(o.flatbuffer_size), + base_offset(o.base_offset) { + buffers.reserve(o.buffers.size()); + for (const auto &buffers_ : o.buffers) { buffers.emplace_back((buffers_) ? new tflite::xnnpack::cache::schema::BufferT(*buffers_) : nullptr); } +} + +inline PackedWeightsT &PackedWeightsT::operator=(PackedWeightsT o) FLATBUFFERS_NOEXCEPT { + std::swap(buffers, o.buffers); + std::swap(flatbuffer_size, o.flatbuffer_size); + std::swap(base_offset, o.base_offset); + return *this; +} + +inline PackedWeightsT *PackedWeights::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new PackedWeightsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void PackedWeights::UnPackTo(PackedWeightsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->buffers[_i]) { _e->Get(_i)->UnPackTo(_o->buffers[_i].get(), _resolver); } else { _o->buffers[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->buffers.resize(0); } } + { auto _e = flatbuffer_size(); _o->flatbuffer_size = _e; } + { auto _e = base_offset(); _o->base_offset = _e; } +} + +inline ::flatbuffers::Offset PackedWeights::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const PackedWeightsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreatePackedWeights(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreatePackedWeights(::flatbuffers::FlatBufferBuilder &_fbb, const PackedWeightsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const PackedWeightsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _buffers = _o->buffers.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _flatbuffer_size = _o->flatbuffer_size; + auto _base_offset = _o->base_offset; + return tflite::xnnpack::cache::schema::CreatePackedWeights( + _fbb, + _buffers, + _flatbuffer_size, + _base_offset); +} + +inline const tflite::xnnpack::cache::schema::PackedWeights *GetPackedWeights(const void *buf) { + return ::flatbuffers::GetRoot(buf); +} + +inline const tflite::xnnpack::cache::schema::PackedWeights *GetSizePrefixedPackedWeights(const void *buf) { + return ::flatbuffers::GetSizePrefixedRoot(buf); +} + +inline PackedWeights *GetMutablePackedWeights(void *buf) { + return ::flatbuffers::GetMutableRoot(buf); +} + +inline tflite::xnnpack::cache::schema::PackedWeights *GetMutableSizePrefixedPackedWeights(void *buf) { + return ::flatbuffers::GetMutableSizePrefixedRoot(buf); +} + +inline const char *PackedWeightsIdentifier() { + return "V001"; +} + +inline bool PackedWeightsBufferHasIdentifier(const void *buf) { + return ::flatbuffers::BufferHasIdentifier( + buf, PackedWeightsIdentifier()); +} + +inline bool SizePrefixedPackedWeightsBufferHasIdentifier(const void *buf) { + return ::flatbuffers::BufferHasIdentifier( + buf, PackedWeightsIdentifier(), true); +} + +inline bool VerifyPackedWeightsBuffer( + ::flatbuffers::Verifier &verifier) { + return verifier.VerifyBuffer(PackedWeightsIdentifier()); +} + +inline bool VerifySizePrefixedPackedWeightsBuffer( + ::flatbuffers::Verifier &verifier) { + return verifier.VerifySizePrefixedBuffer(PackedWeightsIdentifier()); +} + +inline const char *PackedWeightsExtension() { + return "xnn_weights"; +} + +inline void FinishPackedWeightsBuffer( + ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::Offset root) { + fbb.Finish(root, PackedWeightsIdentifier()); +} + +inline void FinishSizePrefixedPackedWeightsBuffer( + ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::Offset root) { + fbb.FinishSizePrefixed(root, PackedWeightsIdentifier()); +} + +inline std::unique_ptr UnPackPackedWeights( + const void *buf, + const ::flatbuffers::resolver_function_t *res = nullptr) { + return std::unique_ptr(GetPackedWeights(buf)->UnPack(res)); +} + +inline std::unique_ptr UnPackSizePrefixedPackedWeights( + const void *buf, + const ::flatbuffers::resolver_function_t *res = nullptr) { + return std::unique_ptr(GetSizePrefixedPackedWeights(buf)->UnPack(res)); +} + +} // namespace schema +} // namespace cache +} // namespace xnnpack +} // namespace tflite + +#endif // FLATBUFFERS_GENERATED_WEIGHTCACHESCHEMA_TFLITE_XNNPACK_CACHE_SCHEMA_H_ diff --git a/tensorflow/lite/delegates/xnnpack/weight_cache_test.cc b/tensorflow/lite/delegates/xnnpack/weight_cache_test.cc new file mode 100644 index 00000000000000..6ecbba3b17b8b2 --- /dev/null +++ b/tensorflow/lite/delegates/xnnpack/weight_cache_test.cc @@ -0,0 +1,727 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "tensorflow/lite/delegates/xnnpack/weight_cache.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "xnnpack.h" // from @XNNPACK +#include "flatbuffers/verifier.h" // from @flatbuffers +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h" + +namespace tflite::xnnpack { + +std::ostream& operator<<(std::ostream& os, const PackIdentifier& p) { + return os << "PackIdentifier{pack_algo: " << p.pack_algorithm_id + << ", weights_id: " << p.weights_id << ", bias_id: " << p.bias_id + << "}"; +} + +namespace { + +using testing::ElementsAreArray; +using testing::Ge; + +#ifndef XNN_TEST_WEIGHT_CACHE_TEMP_FILE_TEMPATE +#define XNN_TEST_WEIGHT_CACHE_TEMP_FILE_TEMPATE \ + "/tmp/weight_cache_test_file.XXXXXX" +#endif +constexpr const char kTempFileTemplate[] = + XNN_TEST_WEIGHT_CACHE_TEMP_FILE_TEMPATE; + +// Wraps a call to `mkstemp` to create temporary files. +class TempFileDesc { + public: + static constexpr struct AutoClose { + } kAutoCLose{}; + +#if defined(_MSC_VER) + TempFileDesc() : fd_() { + char filename[L_tmpnam_s]; + errno_t err = tmpnam_s(filename, L_tmpnam_s); + if (err) { + fprintf(stderr, "Could not create temporary filename.\n"); + std::abort(); + } + path_ = filename; + fd_ = open(path_.c_str(), O_CREAT | O_EXCL | O_RDWR, 0644); + if (fd_ < 0) { + fprintf(stderr, "Could not create temporary filename.\n"); + std::abort(); + } + } +#else + TempFileDesc() : fd_(mkstemp(path_.data())) { + if (GetFd() < 0) { + perror("Could not create temporary file"); + } + } +#endif + + explicit TempFileDesc(AutoClose) : TempFileDesc() { Close(); } + + TempFileDesc(const TempFileDesc&) = delete; + TempFileDesc& operator=(const TempFileDesc&) = delete; + + friend void swap(TempFileDesc& a, TempFileDesc& b) { + std::swap(a.path_, b.path_); + std::swap(a.fd_, b.fd_); + } + + TempFileDesc(TempFileDesc&& other) { swap(*this, other); } + TempFileDesc& operator=(TempFileDesc&& other) { + swap(*this, other); + return *this; + } + + ~TempFileDesc() { Close(); } + + void Close() { + if (GetFd() >= 0) { + close(fd_); + fd_ = -1; + } + } + + const std::string& GetPath() const { return path_; } + + const char* GetCPath() const { return path_.c_str(); } + + int GetFd() const { return fd_; } + + bool IsOpen() const { return fd_ >= 0; } + + private: + std::string path_ = kTempFileTemplate; + int fd_ = -1; +}; + +TEST(MMapHandleTest, DefaultConstructs) { + MMapHandle handle; + EXPECT_FALSE(handle.IsMapped()); + EXPECT_EQ(handle.data(), nullptr); + EXPECT_EQ(handle.size(), 0); +} + +TEST(MMapHandleTest, MapNonExitxingFileFails) { + // I hope this path doesn't exist... + const char* file_path = "sdbgfd"; + MMapHandle handle; + EXPECT_FALSE(handle.Map(file_path)); +} + +TEST(MMapHandleTest, MapExistingFileWorks) { + using std::size; + + const std::string payload = "This is some data in the file."; + + TempFileDesc tmp_file; + ASSERT_TRUE(tmp_file.IsOpen()); + ASSERT_EQ(write(tmp_file.GetFd(), payload.c_str(), size(payload)), + size(payload)); + tmp_file.Close(); + + MMapHandle handle; + ASSERT_TRUE(handle.Map(tmp_file.GetCPath())); + EXPECT_TRUE(handle.IsMapped()); + EXPECT_NE(handle.data(), nullptr); + EXPECT_THAT(handle.size(), Ge(size(payload))); + EXPECT_THAT(handle, ElementsAreArray(payload)); + + handle.UnMap(); + EXPECT_FALSE(handle.IsMapped()); + EXPECT_EQ(handle.data(), nullptr); + EXPECT_EQ(handle.size(), 0); +} + +TEST(MMapHandleTest, MoveConstructs) { + const std::string payload = "This is some data in the file."; + + TempFileDesc tmp_file; + ASSERT_TRUE(tmp_file.IsOpen()); + ASSERT_EQ(write(tmp_file.GetFd(), payload.c_str(), size(payload)), + size(payload)); + tmp_file.Close(); + + MMapHandle handle; + ASSERT_TRUE(handle.Map(tmp_file.GetCPath())); + + MMapHandle handle2(std::move(handle)); + + // We are checking that the moved from handle has lost control over the data. + // NOLINTBEGIN(bugprone-use-after-move) + EXPECT_FALSE(handle.IsMapped()); + EXPECT_EQ(handle.data(), nullptr); + EXPECT_EQ(handle.size(), 0); + // NOLINTEND(bugprone-use-after-move) + + EXPECT_TRUE(handle2.IsMapped()); + EXPECT_NE(handle2.data(), nullptr); + EXPECT_THAT(handle2.size(), Ge(size(payload))); + EXPECT_THAT(handle2, ElementsAreArray(payload)); +} + +TEST(WeightCacheBuilderTest, ReserveAppendWriteWorks) { + using std::size; + + const std::string payload = "This is some data in the file."; + const PackIdentifier dummy_id{1, 2, 3}; + + WeightCacheBuilder builder; + + const size_t payload_size = size(payload); + void* buffer = builder.Reserve(payload_size); + std::memcpy(buffer, payload.c_str(), payload_size); + auto loc = builder.Append(dummy_id, buffer, payload_size); + + EXPECT_EQ(loc.size, payload_size); + EXPECT_EQ(builder.BufferData().size(), payload_size); + EXPECT_TRUE(builder.ShouldWrite()); + + TempFileDesc tmp_file; + ASSERT_TRUE(tmp_file.IsOpen()); + tmp_file.Close(); + + ASSERT_TRUE(builder.Write(tmp_file.GetCPath())); + + MMapHandle handle; + ASSERT_TRUE(handle.Map(tmp_file.GetCPath())); + + const cache::schema::PackedWeights* const packed_weights = + cache::schema::GetPackedWeights(handle.data()); + ASSERT_NE(packed_weights, nullptr); + EXPECT_LE(packed_weights->flatbuffer_size(), size(handle) - size(payload)); + ASSERT_NE(packed_weights->buffers(), nullptr); + ASSERT_EQ(packed_weights->buffers()->size(), 1); + ASSERT_NE(packed_weights->buffers()->Get(0), nullptr); + ASSERT_EQ(packed_weights->buffers()->Get(0)->size(), size(payload)); + EXPECT_EQ(packed_weights->buffers()->Get(0)->offset(), 0); + ASSERT_EQ(packed_weights->buffers()->Get(0)->packing_algorithm_id(), + dummy_id.pack_algorithm_id); + ASSERT_EQ(packed_weights->buffers()->Get(0)->weights_id(), + dummy_id.weights_id); + ASSERT_EQ(packed_weights->buffers()->Get(0)->bias_id(), dummy_id.bias_id); + + flatbuffers::Verifier verifier(handle.data(), handle.size()); + EXPECT_TRUE(cache::schema::VerifyPackedWeightsBuffer(verifier)) + << packed_weights->flatbuffer_size() << " " << handle.size() << " " + << packed_weights->buffers()->size() << "\n" + << tmp_file.GetPath(); +} + +TEST(WeightCacheBuilderTest, AppendWithoutReserveWriteWorks) { + using std::size; + + const std::string payload = "This is some data in the file."; + const PackIdentifier dummy_id{1, 2, 3}; + + WeightCacheBuilder builder; + + const size_t payload_size = size(payload); + auto loc = builder.Append(dummy_id, payload.c_str(), payload_size); + + EXPECT_EQ(loc.size, payload_size); + EXPECT_EQ(builder.BufferData().size(), payload_size); + EXPECT_TRUE(builder.ShouldWrite()); + + TempFileDesc tmp_file; + ASSERT_TRUE(tmp_file.IsOpen()); + tmp_file.Close(); + + ASSERT_TRUE(builder.Write(tmp_file.GetCPath())); + + MMapHandle handle; + ASSERT_TRUE(handle.Map(tmp_file.GetCPath())); + + const cache::schema::PackedWeights* const packed_weights = + cache::schema::GetPackedWeights(handle.data()); + ASSERT_NE(packed_weights, nullptr); + EXPECT_LE(packed_weights->flatbuffer_size(), size(handle) - size(payload)); + ASSERT_NE(packed_weights->buffers(), nullptr); + ASSERT_EQ(packed_weights->buffers()->size(), 1); + ASSERT_NE(packed_weights->buffers()->Get(0), nullptr); + ASSERT_EQ(packed_weights->buffers()->Get(0)->size(), size(payload)); + EXPECT_EQ(packed_weights->buffers()->Get(0)->offset(), 0); + ASSERT_EQ(packed_weights->buffers()->Get(0)->packing_algorithm_id(), + dummy_id.pack_algorithm_id); + ASSERT_EQ(packed_weights->buffers()->Get(0)->weights_id(), + dummy_id.weights_id); + ASSERT_EQ(packed_weights->buffers()->Get(0)->bias_id(), dummy_id.bias_id); + + flatbuffers::Verifier verifier(handle.data(), handle.size()); + EXPECT_TRUE(cache::schema::VerifyPackedWeightsBuffer(verifier)) + << packed_weights->flatbuffer_size() << " " << handle.size() << " " + << packed_weights->buffers()->size() << "\n" + << tmp_file.GetPath(); +} + +TEST(WeightCacheBuilderTest, NonExistingPathFails) { + using std::size; + + const std::string payload = "This is some data in the file."; + const PackIdentifier dummy_id{1, 2, 3}; + + WeightCacheBuilder builder; + + const size_t payload_size = size(payload); + auto loc = builder.Append(dummy_id, payload.c_str(), payload_size); + + EXPECT_EQ(loc.size, payload_size); + EXPECT_EQ(builder.BufferData().size(), payload_size); + EXPECT_TRUE(builder.ShouldWrite()); + + EXPECT_FALSE(builder.Write("")); + EXPECT_FALSE(builder.Write("/selktjdsljf")); +} + +struct FakeContext { + // Adds a new tensor and it's backing buffer to the context. + // + // The tensor `data` will not be set until `FinalizeTensors` is called. + void AddTensor(int buffer_identifier, size_t size) { + buffers.emplace_back(size, buffer_identifier); + tensors.push_back({}); + tensors.back().allocation_type = kTfLiteMmapRo; + tensor_buffer_identifiers[tensors.size() - 1] = buffer_identifier; + } + + // Updates the tensor data mappings. + // + // This needs to be called every time the context `tensors` list is + // reallocated (mainly because of insertions). + void FinalizeTensors() { + for (size_t i = 0; i < tensors.size(); ++i) { + tensors[i].data.data = buffers[i].data(); + tensors[i].bytes = buffers[i].size(); + } + } + + // Creates a look up key for the XNNPack weight provider C interface. + xnn_weights_cache_look_up_key LookUpKey(const uint32_t algorithm_seed, + const int weights_index) const { + return {.seed = algorithm_seed, + .kernel = buffers[weights_index].data(), + .bias = nullptr}; + } + + // Creates a look up key for the XNNPack weight provider C interface. + xnn_weights_cache_look_up_key LookUpKey(const uint32_t algorithm_seed, + const int weights_index, + const int bias_index) const { + return {.seed = algorithm_seed, + .kernel = buffers[weights_index].data(), + .bias = buffers[bias_index].data()}; + } + + // Helps creating fake packed data. + void AddTensorToPack(std::vector& pack_buffer, int index) { + const std::vector& buffer = buffers[index]; + pack_buffer.resize(std::max(size(pack_buffer), size(buffer))); + for (size_t i = 0; i < size(buffer); ++i) { + pack_buffer[i] ^= buffer[i]; + } + } + + // Packs the referenced tensors into one buffer. + // + // Returns the pack id to retrieve the packed reference data from + // `packed_buffers`. + template + PackIdentifier PackTensors(xnn_weights_cache_t weight_cache, + const uint32_t algorithm_seed, + const Ids... tensor_indices) { + // Create fake packed and save the result for later lookup tests. + + PackIdentifier pack_id{algorithm_seed, + tensor_buffer_identifiers[tensor_indices]...}; + PackedBuffer& packed = + packed_buffers.emplace(pack_id, PackedBuffer{})->second; + (AddTensorToPack(packed.buffer, tensor_indices), ...); + + // Add the packed buffer to the XNNPack cache. Normaly you would pack in + // place where the reserved space is. + xnn_weights_cache_look_up_key look_up_key = + LookUpKey(algorithm_seed, tensor_indices...); + packed.offset = weight_cache->look_up_or_insert( + weight_cache->context, &look_up_key, packed.buffer.data(), + packed.buffer.size()); + return pack_id; + } + + struct PackedBuffer { + size_t offset; + std::vector buffer; + }; + + std::vector tensors; + std::vector> buffers; + std::unordered_multimap + packed_buffers; + std::unordered_map tensor_buffer_identifiers; +}; + +struct BuildMMapWeightCacheProviderTest : testing::Test { + enum { kAlgoSeed1, kAlgoSeed2, kAlgoSeed3 }; + enum { kBufferId1, kBufferId2, kBufferId3, kBufferId4 }; + + void SetUp() override { + AddTensors(); + EndSetup(); + } + + void AddTensors() { + ctx.AddTensor(/*buffer_identifier=*/kBufferId1, /*size=*/12); + ctx.AddTensor(/*buffer_identifier=*/kBufferId2, /*size=*/43); + ctx.AddTensor(/*buffer_identifier=*/kBufferId3, /*size=*/64); + ctx.AddTensor(/*buffer_identifier=*/kBufferId4, /*size=*/8); + } + + void EndSetup() { + ctx.FinalizeTensors(); + cache_provider.MapTensorIdentifiers(ctx.tensors.data(), ctx.tensors.size(), + ctx.tensor_buffer_identifiers); + } + + FakeContext ctx; + MMapWeightCacheProvider cache_provider; +}; + +TEST_F(BuildMMapWeightCacheProviderTest, LookUpFailsIfKeyDoesntMatch) { + xnn_weights_cache_look_up_key look_up_key{}; + EXPECT_EQ(cache_provider.LookUp(&look_up_key), SIZE_MAX); +} + +TEST_F(BuildMMapWeightCacheProviderTest, LookUpSucceeds) { + enum { kWeightIndex, kBiasIndex }; + const auto pack_id = ctx.PackTensors(&cache_provider.GetCacheProvider(), + kAlgoSeed1, kWeightIndex, kBiasIndex); + const xnn_weights_cache_look_up_key look_up_key = + ctx.LookUpKey(kAlgoSeed1, kWeightIndex, kBiasIndex); + + EXPECT_EQ(cache_provider.LookUp(&look_up_key), + ctx.packed_buffers.find(pack_id)->second.offset); +} + +TEST_F(BuildMMapWeightCacheProviderTest, + DifferentAlgoSeedsSameTensorsDontConflict) { + enum { kWeightIndex, kBiasIndex }; + const auto pack_id_1 = ctx.PackTensors(&cache_provider.GetCacheProvider(), + kAlgoSeed1, kWeightIndex, kBiasIndex); + const auto pack_id_2 = ctx.PackTensors(&cache_provider.GetCacheProvider(), + kAlgoSeed2, kWeightIndex, kBiasIndex); + + const xnn_weights_cache_look_up_key look_up_key_1 = + ctx.LookUpKey(kAlgoSeed1, kWeightIndex, kBiasIndex); + const xnn_weights_cache_look_up_key look_up_key_2 = + ctx.LookUpKey(kAlgoSeed2, kWeightIndex, kBiasIndex); + + EXPECT_EQ(cache_provider.LookUp(&look_up_key_1), + ctx.packed_buffers.find(pack_id_1)->second.offset); + EXPECT_EQ(cache_provider.LookUp(&look_up_key_2), + ctx.packed_buffers.find(pack_id_2)->second.offset); + EXPECT_NE(cache_provider.LookUp(&look_up_key_1), + cache_provider.LookUp(&look_up_key_2)); +} + +TEST_F(BuildMMapWeightCacheProviderTest, + SameAlgoSeedDifferentTensorsDontConflict) { + enum { kWeightIndex1, kWeightIndex2, kBiasIndex1, kBiasIndex2 }; + const auto pack_id_1 = + ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1, + kWeightIndex1, kBiasIndex1); + const auto pack_id_2 = + ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1, + kWeightIndex2, kBiasIndex1); + const auto pack_id_3 = + ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1, + kWeightIndex1, kBiasIndex2); + const auto pack_id_4 = + ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1, + kWeightIndex2, kBiasIndex2); + + const xnn_weights_cache_look_up_key look_up_key_1 = + ctx.LookUpKey(kAlgoSeed1, kWeightIndex1, kBiasIndex1); + const xnn_weights_cache_look_up_key look_up_key_2 = + ctx.LookUpKey(kAlgoSeed1, kWeightIndex2, kBiasIndex1); + const xnn_weights_cache_look_up_key look_up_key_3 = + ctx.LookUpKey(kAlgoSeed1, kWeightIndex1, kBiasIndex2); + const xnn_weights_cache_look_up_key look_up_key_4 = + ctx.LookUpKey(kAlgoSeed1, kWeightIndex2, kBiasIndex2); + + EXPECT_EQ(cache_provider.LookUp(&look_up_key_1), + ctx.packed_buffers.find(pack_id_1)->second.offset); + EXPECT_EQ(cache_provider.LookUp(&look_up_key_2), + ctx.packed_buffers.find(pack_id_2)->second.offset); + EXPECT_EQ(cache_provider.LookUp(&look_up_key_3), + ctx.packed_buffers.find(pack_id_3)->second.offset); + EXPECT_EQ(cache_provider.LookUp(&look_up_key_4), + ctx.packed_buffers.find(pack_id_4)->second.offset); + EXPECT_NE(cache_provider.LookUp(&look_up_key_1), + cache_provider.LookUp(&look_up_key_2)); + EXPECT_NE(cache_provider.LookUp(&look_up_key_1), + cache_provider.LookUp(&look_up_key_3)); + EXPECT_NE(cache_provider.LookUp(&look_up_key_1), + cache_provider.LookUp(&look_up_key_4)) + << pack_id_1 << " " << pack_id_4; + EXPECT_NE(cache_provider.LookUp(&look_up_key_2), + cache_provider.LookUp(&look_up_key_3)); + EXPECT_NE(cache_provider.LookUp(&look_up_key_2), + cache_provider.LookUp(&look_up_key_4)); + EXPECT_NE(cache_provider.LookUp(&look_up_key_3), + cache_provider.LookUp(&look_up_key_4)); +} + +TEST_F(BuildMMapWeightCacheProviderTest, FinalizeWorks) { + enum { kWeightIndex1, kBiasIndex, kWeightIndex2 }; + TempFileDesc tmp_file; + + ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1, kWeightIndex1, + kBiasIndex); + ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed2, + kWeightIndex2); + + EXPECT_FALSE(cache_provider.Finalize()); + cache_provider.SetFilePath(tmp_file.GetCPath()); + + EXPECT_TRUE(cache_provider.IsActive()); + EXPECT_TRUE(cache_provider.IsBuilding()); + ASSERT_TRUE(cache_provider.Finalize()); + + ASSERT_TRUE(cache_provider.IsFinalized()); +} + +struct LoadMMapWeightCacheProviderTest : BuildMMapWeightCacheProviderTest { + enum { kWeightIndex1, kBiasIndex, kWeightIndex2 }; + + void SetUp() override { + BuildMMapWeightCacheProviderTest::SetUp(); + cache_provider.SetFilePath(tmp_file.GetCPath()); + + pack_id_1 = ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1, + kWeightIndex1, kBiasIndex); + pack_id_2 = ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed2, + kWeightIndex2); + + ASSERT_TRUE(cache_provider.Finalize()); + ASSERT_TRUE(cache_provider.IsFinalized()); + } + + xnn_weights_cache_look_up_key LookUpKey1() const { + return ctx.LookUpKey(kAlgoSeed1, kWeightIndex1, kBiasIndex); + } + + xnn_weights_cache_look_up_key LookUpKey2() const { + return ctx.LookUpKey(kAlgoSeed2, kWeightIndex2); + } + + TempFileDesc tmp_file; + PackIdentifier pack_id_1; + PackIdentifier pack_id_2; +}; + +TEST_F(LoadMMapWeightCacheProviderTest, LookUpFailsIfKeyDoesntMatch) { + xnn_weights_cache_look_up_key look_up_key{}; + EXPECT_EQ(cache_provider.LookUp(&look_up_key), SIZE_MAX); +} + +template +class LightSpan { + public: + using value_type = T; + + LightSpan(const void* data, const size_t size) + : ptr_(reinterpret_cast(data)), size_(size) {} + + const T* begin() const { return ptr_; } + const T* end() const { return ptr_ + size_; } + + private: + T* ptr_; + size_t size_; +}; + +TEST_F(LoadMMapWeightCacheProviderTest, LookUpSucceeds) { + const auto& reference_1 = ctx.packed_buffers.find(pack_id_1)->second; + const auto& reference_2 = ctx.packed_buffers.find(pack_id_2)->second; + + const xnn_weights_cache_look_up_key look_up_key_1 = LookUpKey1(); + const xnn_weights_cache_look_up_key look_up_key_2 = LookUpKey2(); + + const uint64_t offset_1 = cache_provider.LookUp(&look_up_key_1); + const uint64_t offset_2 = cache_provider.LookUp(&look_up_key_2); + + ASSERT_EQ(offset_1, reference_1.offset); + ASSERT_EQ(offset_2, reference_2.offset); + + const void* const addr_1 = cache_provider.OffsetToAddr(offset_1); + const void* const addr_2 = cache_provider.OffsetToAddr(offset_2); + + ASSERT_NE(addr_1, nullptr); + ASSERT_NE(addr_2, nullptr); + + EXPECT_THAT(LightSpan(addr_1, reference_1.buffer.size()), + ElementsAreArray(reference_1.buffer)); + EXPECT_THAT(LightSpan(addr_2, reference_2.buffer.size()), + ElementsAreArray(reference_2.buffer)); +} + +TEST(MMapWeightCacheProviderTest, XnnpackCApiJourney) { + using std::size; + TempFileDesc temp_fd(TempFileDesc::kAutoCLose); + const int32_t fake_packing_algo_seed = 0xBA0BAB; + const char packed_data_ref_1[] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + const char packed_data_ref_2[] = {26, 32, 43, 59, 34, 65, 80, 101}; + auto bytes = [](const auto& array) { return size(array) * sizeof(array[0]); }; + + constexpr int kBufferCount = 10; + // We are going to feed dummy packed data. We only need a valid pointer + // address to map to a buffer identifier. + char fake_buffer_pointer[kBufferCount] = {0}; + + { // Build and reload scenario. + TfLiteTensor tensors[kBufferCount]; + std::unordered_map tensor_buffer_identifiers; + for (int i = 0; i < kBufferCount; ++i) { + tensors[i].data.data = (void*)(fake_buffer_pointer + i); + tensor_buffer_identifiers[i] = i; + } + + MMapWeightCacheProvider cache_provider; + cache_provider.SetFilePath(temp_fd.GetCPath()); + + xnn_weights_cache_t cache = &cache_provider.GetCacheProvider(); + cache_provider.MapTensorIdentifiers(tensors, size(tensors), + tensor_buffer_identifiers); + + const xnn_weights_cache_look_up_key look_up_key_1{ + .seed = fake_packing_algo_seed, + .kernel = tensors[0].data.data, + .bias = tensors[1].data.data}; + + // Lookup non-packed tensor. + ASSERT_EQ(cache->look_up(cache, &look_up_key_1), SIZE_MAX); + // Reserve space, write data and add packed data. + void* const reserved_ptr = + cache->reserve_space(cache, bytes(packed_data_ref_1)); + ASSERT_NE(reserved_ptr, nullptr); + std::memcpy(reserved_ptr, packed_data_ref_1, bytes(packed_data_ref_1)); + const size_t build_offset_1 = cache->look_up_or_insert( + cache, &look_up_key_1, reserved_ptr, bytes(packed_data_ref_1)); + + // Check that a second insertion with the same key returns the same offset. + const size_t build_offset_redundant = cache->look_up_or_insert( + cache, &look_up_key_1, reserved_ptr, bytes(packed_data_ref_1)); + EXPECT_EQ(build_offset_1, build_offset_redundant); + + // Lookup newly packed tensor. + ASSERT_EQ(cache->look_up(cache, &look_up_key_1), build_offset_1); + + // Add a tensor without reserving before. + const xnn_weights_cache_look_up_key look_up_key_2{ + .seed = fake_packing_algo_seed, + .kernel = tensors[2].data.data, + .bias = tensors[3].data.data}; + const size_t build_offset_2 = cache->look_up_or_insert( + cache, &look_up_key_2, (void*)packed_data_ref_2, + bytes(packed_data_ref_2)); + + // Save the cache to disk and reload. + ASSERT_TRUE(cache_provider.Finalize()); + + ASSERT_TRUE(cache->is_finalized(cache)); + + const size_t reload_offset_1 = cache->look_up(cache, &look_up_key_1); + ASSERT_EQ(reload_offset_1, build_offset_1); + + const void* const loaded_packed_data_1 = + cache->offset_to_addr(cache, reload_offset_1); + ASSERT_NE(loaded_packed_data_1, nullptr); + EXPECT_THAT( + LightSpan(loaded_packed_data_1, size(packed_data_ref_1)), + ElementsAreArray(packed_data_ref_1)); + + const size_t reload_offset_2 = cache->look_up(cache, &look_up_key_2); + ASSERT_EQ(reload_offset_2, build_offset_2); + + const void* const loaded_packed_data_2 = + cache->offset_to_addr(cache, reload_offset_2); + ASSERT_NE(loaded_packed_data_2, nullptr); + EXPECT_THAT( + LightSpan(loaded_packed_data_2, size(packed_data_ref_2)), + ElementsAreArray(packed_data_ref_2)); + } + + { // Load existing cache scenario. + TfLiteTensor tensors[kBufferCount]; + std::unordered_map tensor_buffer_identifiers; + for (int i = 0; i < kBufferCount; ++i) { + tensors[i].data.data = (void*)(fake_buffer_pointer + i); + tensor_buffer_identifiers[i] = i; + } + + MMapWeightCacheProvider cache_provider; + ASSERT_TRUE(cache_provider.Load(temp_fd.GetCPath())); + + xnn_weights_cache_t cache = &cache_provider.GetCacheProvider(); + cache_provider.MapTensorIdentifiers(tensors, size(tensors), + tensor_buffer_identifiers); + + const xnn_weights_cache_look_up_key look_up_key_1{ + .seed = fake_packing_algo_seed, + .kernel = tensors[0].data.data, + .bias = tensors[1].data.data}; + + const xnn_weights_cache_look_up_key look_up_key_2{ + .seed = fake_packing_algo_seed, + .kernel = tensors[2].data.data, + .bias = tensors[3].data.data}; + + ASSERT_TRUE(cache->is_finalized(cache)); + + const size_t offset_1 = cache->look_up(cache, &look_up_key_1); + const void* const loaded_packed_data_1 = + cache->offset_to_addr(cache, offset_1); + ASSERT_NE(loaded_packed_data_1, nullptr); + EXPECT_THAT( + LightSpan(loaded_packed_data_1, size(packed_data_ref_1)), + ElementsAreArray(packed_data_ref_1)); + + const size_t offset_2 = cache->look_up(cache, &look_up_key_2); + const void* const loaded_packed_data_2 = + cache->offset_to_addr(cache, offset_2); + ASSERT_NE(loaded_packed_data_2, nullptr); + EXPECT_THAT( + LightSpan(loaded_packed_data_2, size(packed_data_ref_2)), + ElementsAreArray(packed_data_ref_2)); + } +} + +} // namespace +} // namespace tflite::xnnpack diff --git a/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc b/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc index 76cc6dba209ab9..26d24bca18032d 100644 --- a/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc +++ b/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc @@ -38,13 +38,16 @@ limitations under the License. #include "tensorflow/lite/core/api/profiler.h" #include "tensorflow/lite/core/c/builtin_op_data.h" #include "tensorflow/lite/core/c/common.h" +#include "tensorflow/lite/core/subgraph.h" #include "tensorflow/lite/delegates/xnnpack/quantization_util.h" +#include "tensorflow/lite/delegates/xnnpack/weight_cache.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/utils/sparsity_format_converter.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/logger.h" #include "tensorflow/lite/minimal_logging.h" #include "tensorflow/lite/schema/schema_generated.h" #include "tensorflow/lite/tools/optimize/reduced_precision_support.h" @@ -487,6 +490,8 @@ class VariableHolder { std::map global_id_to_dims_and_type_; }; +class Subgraph; + class Delegate { friend class Subgraph; @@ -518,13 +523,38 @@ class Delegate { } #endif - TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, - "Created TensorFlow Lite XNNPACK delegate for CPU."); + TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO, + "Created TensorFlow Lite XNNPACK delegate for CPU."); options_ = options != nullptr ? *options : TfLiteXNNPackDelegateOptionsDefault(); delegate_.flags = GetXNNPackDelegateFlags(); workspace_.reset(workspace); + + // If no weight cache is provided, add one when requested. + if (!options_.weights_cache) { + if (options_.experimental_weight_cache_file_path) { + if (weight_cache_provider_.Load( + options_.experimental_weight_cache_file_path)) { + TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO, + "XNNPack weight cache loaded from '%s'.", + options_.experimental_weight_cache_file_path); + } else { + TFLITE_LOG_PROD( + tflite::TFLITE_LOG_INFO, + "XNNPack weight cache not found at '%s', building it.", + options_.experimental_weight_cache_file_path); + } + options_.weights_cache = + reinterpret_cast( + weight_cache_provider_.GetCacheProvider().context); + options_.experimental_weight_cache_file_path = + weight_cache_provider_.GetFilePath().data(); + } else { + TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO, + "XNNPack weight cache not enabled."); + } + } } TfLiteIntArray* PrepareOpsToDelegate(TfLiteContext* context); @@ -664,7 +694,7 @@ class Delegate { #endif } - TfLiteXNNPackDelegateOptions options() const { return options_; } + const TfLiteXNNPackDelegateOptions& options() const { return options_; } int64_t GetXNNPackDelegateFlags() { if (enable_subgraph_reshaping()) { @@ -711,6 +741,10 @@ class Delegate { TfLiteXNNPackDelegateOptions options_{}; VariableHolder variable_holder_; std::mutex workspace_mutex_; + + // If no weight cache is provided and a cache is set in the delegate options, + // this will be used as a weight cache. + MMapWeightCacheProvider weight_cache_provider_; }; class Subgraph { @@ -781,6 +815,13 @@ class Subgraph { static Subgraph* Create(TfLiteContext* context, const TfLiteDelegateParams* params, Delegate& delegate) { + // Map tensors identifiers before packing anything. + if (delegate.weight_cache_provider_.IsActive()) { + delegate.weight_cache_provider_.MapTensorIdentifiers( + context->tensors, context->tensors_size, + reinterpret_cast(context->impl_) + ->GetTensorBufferIdentifiers()); + } // Convert subgraph inputs and outputs to hash sets for faster lookup. const std::unordered_set inputs( ¶ms->input_tensors->data[0], @@ -1121,6 +1162,18 @@ class Subgraph { TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node, bool enable_subgraph_reshaping, Delegate* delegate) { std::lock_guard lock(delegate->workspace_mutex_); + + // The weights cache needs to be finalized only once. Prepare will be called + // for each partition after all the partitions have been created (therefore + // all the weights are known and have been packed). + if (delegate->weight_cache_provider_.IsActive()) { + if (!delegate->weight_cache_provider_.Finalize()) { + TF_LITE_KERNEL_LOG(context, + "XNNPack delegate failed to finalize cache."); + return kTfLiteError; + } + } + if (enable_subgraph_reshaping) { xnn_status status = xnn_status_invalid_state; for (int i = 0; i < inputs_.size(); ++i) { @@ -1170,10 +1223,8 @@ class Subgraph { return kTfLiteError; } } - return kTfLiteOk; - } else { - return kTfLiteOk; } + return kTfLiteOk; } TfLiteStatus Invoke(TfLiteContext* context, bool enable_subgraph_reshaping, @@ -7798,7 +7849,6 @@ void TfLiteXNNPackDelegateWeightsCacheDelete( } auto weights_cache = reinterpret_cast(cache); xnn_delete_weights_cache(weights_cache); - xnn_deinitialize(); } TfLiteXNNPackDelegateOptions TfLiteXNNPackDelegateOptionsDefault() { @@ -7830,14 +7880,6 @@ TfLiteXNNPackDelegateOptions TfLiteXNNPackDelegateOptionsDefault() { return options; } -TfLiteXNNPackDelegateOptions GetOptions(const void* delegate_data) { - if (delegate_data == nullptr) { - return TfLiteXNNPackDelegateOptionsDefault(); - } - return static_cast(delegate_data) - ->options(); -} - TfLiteDelegate* TfLiteXNNPackDelegateCreate( const TfLiteXNNPackDelegateOptions* options) { return TfLiteXNNPackDelegateCreateWithThreadpool(options, nullptr); @@ -7869,6 +7911,15 @@ void* TfLiteXNNPackDelegateGetThreadPool(TfLiteDelegate* delegate) { static_cast<::tflite::xnnpack::Delegate*>(delegate->data_)->threadpool()); } +const TfLiteXNNPackDelegateOptions* TfLiteXNNPackDelegateGetOptions( + TfLiteDelegate* delegate) { + if (delegate == nullptr) { + return nullptr; + } + return &(static_cast(delegate->data_) + ->options()); +} + int TfLiteXNNPackDelegateGetFlags(TfLiteDelegate* delegate) { if (delegate == nullptr) { return 0; diff --git a/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h b/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h index aa11998dc0fc49..55eddcf1a54d67 100644 --- a/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h +++ b/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h @@ -70,6 +70,10 @@ typedef struct { bool handle_variable_ops; // Enable adaptive optimization for AVX CPUs. bool experimental_adaptive_avx_optimization; + // Path to the weight cache to load if `weight_cache` is undefined. + // + // WARNING this is an experimental flag. + const char* experimental_weight_cache_file_path; } TfLiteXNNPackDelegateOptions; // Returns a structure with the default XNNPack delegate options. @@ -97,6 +101,13 @@ TfLiteDelegate* TfLiteXNNPackDelegateCreateWithThreadpool( TFL_CAPI_EXPORT void* TfLiteXNNPackDelegateGetThreadPool( TfLiteDelegate* delegate); +// Returns the options in the delegate. +// Returns NULL if the delegate is NULL. +// +// WARNING: This API is experimental and subject to change. +TFL_CAPI_EXPORT const TfLiteXNNPackDelegateOptions* +TfLiteXNNPackDelegateGetOptions(TfLiteDelegate* delegate); + // Returns the flags used for an XNNPack delegate. // See documentation for TfLiteXNNPackDelegateOptions.flags. // @@ -111,11 +122,13 @@ TFL_CAPI_EXPORT void TfLiteXNNPackDelegateDelete(TfLiteDelegate* delegate); // reduce memory bandwidth. TFL_CAPI_EXPORT struct TfLiteXNNPackDelegateWeightsCache* TfLiteXNNPackDelegateWeightsCacheCreate(); + // Creates a new weights cache with a specified initial size that can be shared // with multiple delegate instances. The weights cache can hold up to size bytes // without growing. TFL_CAPI_EXPORT struct TfLiteXNNPackDelegateWeightsCache* TfLiteXNNPackDelegateWeightsCacheCreateWithSize(size_t size); + // Soft-finalize a weights cache. Extra space will be left in the weights cache // to allow for cache "insertion" only if it is a cache hit. This has memory // overhead compared to TfLiteXNNPackDelegateWeightsCacheFinalizeHard. Use this @@ -124,6 +137,7 @@ TfLiteXNNPackDelegateWeightsCacheCreateWithSize(size_t size); // Returns true on success, false on error. TFL_CAPI_EXPORT bool TfLiteXNNPackDelegateWeightsCacheFinalizeSoft( struct TfLiteXNNPackDelegateWeightsCache* cache); + // Hard-finalize a weights cache, cache is effectively frozen and no more cache // operations are allowed. Memory is resized to smallest possible. Use this if // the number of interpreter instances using XNNPACK delegate can be fixed and @@ -132,6 +146,7 @@ TFL_CAPI_EXPORT bool TfLiteXNNPackDelegateWeightsCacheFinalizeSoft( // Returns true on success, false on error. TFL_CAPI_EXPORT bool TfLiteXNNPackDelegateWeightsCacheFinalizeHard( struct TfLiteXNNPackDelegateWeightsCache* cache); + // Destroys a weights cache created with // `TfLiteXNNPackDelegateWeightsCacheCreate` call. TFL_CAPI_EXPORT void TfLiteXNNPackDelegateWeightsCacheDelete( diff --git a/tensorflow/lite/delegates/xnnpack/xnnpack_delegate_test.h b/tensorflow/lite/delegates/xnnpack/xnnpack_delegate_test.h deleted file mode 100644 index b20d024f06debe..00000000000000 --- a/tensorflow/lite/delegates/xnnpack/xnnpack_delegate_test.h +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_DELEGATES_XNNPACK_XNNPACK_DELEGATE_TEST_H_ -#define TENSORFLOW_LITE_DELEGATES_XNNPACK_XNNPACK_DELEGATE_TEST_H_ - -#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" - -// This test only function has undefined behaviour unless delegate was obtained -// by calling TfLiteOpaqueDelegateGetData on a TfLiteOpaqueDelegate* that refers -// to an XNNPACK delegate, or from the 'data' field of a TfLiteDelegate* that -// refers to an XNNPACK delegate. -TfLiteXNNPackDelegateOptions GetOptions(const void* delegate_data); -#endif // TENSORFLOW_LITE_DELEGATES_XNNPACK_XNNPACK_DELEGATE_TEST_H_ diff --git a/tensorflow/lite/experimental/acceleration/mini_benchmark/BUILD b/tensorflow/lite/experimental/acceleration/mini_benchmark/BUILD index 945eab9ffe521f..a9990f90a1b7f3 100644 --- a/tensorflow/lite/experimental/acceleration/mini_benchmark/BUILD +++ b/tensorflow/lite/experimental/acceleration/mini_benchmark/BUILD @@ -1103,6 +1103,13 @@ cc_test( cc_test( name = "validator_runner_test", srcs = ["validator_runner_test.cc"], + # Shared libraries that are built and loaded into internal Google applications are generally + # unsupported. One consequence of this lack of support is that AddressSanitizer's ODR checking + # will produce spurious failures with an upcoming Crosstool change. The workaround for this is + # to disable the ODR checking on affected tests. + env = { + "ASAN_OPTIONS": "detect_odr_violation=0", + }, tags = [ "no_mac", "no_windows", diff --git a/tensorflow/lite/experimental/acceleration/mini_benchmark/build_defs.bzl b/tensorflow/lite/experimental/acceleration/mini_benchmark/build_defs.bzl index 1c2a1a3f8cb561..6bc40c3c19571b 100644 --- a/tensorflow/lite/experimental/acceleration/mini_benchmark/build_defs.bzl +++ b/tensorflow/lite/experimental/acceleration/mini_benchmark/build_defs.bzl @@ -104,7 +104,7 @@ def validation_model( srcs = [ main_model, jpegs, - "//tensorflow/lite/schema:schema.fbs", + "//tensorflow/compiler/mlir/lite/schema:schema.fbs", metrics_model, ], outs = [name + ".tflite"], @@ -112,7 +112,7 @@ def validation_model( JPEGS='$(locations %s)' JPEGS=$${JPEGS// /,} $(location //tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier:embedder_cmdline) \ - --schema=$(location //tensorflow/lite/schema:schema.fbs) \ + --schema=$(location //tensorflow/compiler/mlir/lite/schema:schema.fbs) \ --main_model=$(location %s) \ --metrics_model=$(location %s) \ %s %s \ diff --git a/tensorflow/lite/g3doc/models/convert/index.md b/tensorflow/lite/g3doc/models/convert/index.md index 094758708a0faa..5aee45c5c1ae1f 100644 --- a/tensorflow/lite/g3doc/models/convert/index.md +++ b/tensorflow/lite/g3doc/models/convert/index.md @@ -97,7 +97,7 @@ for your model: 1. [Optimization flags](../../performance/model_optimization) allow you to specify the type of optimization to apply during conversion. The most commonly used optimization technique is - [post-training quanitization](). + [post-training quantization](). 1. [Metadata flags](metadata) allow you to add metadata to the converted model which makes it easier to create platform specific wrapper code when deploying models on devices. @@ -142,7 +142,7 @@ format model and a custom runtime environment for that model. converting your model. * See the [optimization overview](../../performance/model_optimization) for guidance on how to optimize your converted model using techniques like - [post-training quanitization](../../performance/post_training_quantization). + [post-training quantization](../../performance/post_training_quantization). * See the [Adding metadata overview](metadata) to learn how to add metadata to your models. Metadata provides other uses a description of your model as well as information that can be leveraged by code generators. diff --git a/tensorflow/lite/kernels/internal/optimized/cpu_check.cc b/tensorflow/lite/kernels/internal/optimized/cpu_check.cc index 8fd17a7e33a03e..565299a8004acf 100644 --- a/tensorflow/lite/kernels/internal/optimized/cpu_check.cc +++ b/tensorflow/lite/kernels/internal/optimized/cpu_check.cc @@ -42,9 +42,9 @@ bool DetectDotprodByLinuxAuxvMethod() { bool DetectArmNeonDotprod() { #if defined __linux__ && defined __aarch64__ return DetectDotprodByLinuxAuxvMethod(); -#endif - +#else return false; +#endif } } // namespace tflite diff --git a/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_3x3_filter.h b/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_3x3_filter.h index 0cb1a23e5567b9..8d761dd3dc3e10 100644 --- a/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_3x3_filter.h +++ b/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_3x3_filter.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_3X3_FILTER_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_3X3_FILTER_H_ +#include + #include #include "ruy/profiler/instrumentation.h" // from @ruy @@ -32,7 +34,6 @@ namespace depthwise_conv { // Enable for arm64 except for the Nvidia Linux 4 Tegra (L4T) running on // Jetson TX-2. This compiler does not support the offsetof() macro. #if defined(__aarch64__) && !defined(GOOGLE_L4T) -#include // Represents the number of bytes offset from the start of the // DepthwiseConvParams struct. This is used in the asm to load parameters. diff --git a/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid_3x3_filter.h b/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid_3x3_filter.h index f9472515417e85..5c7abda84fcfa0 100644 --- a/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid_3x3_filter.h +++ b/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid_3x3_filter.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_3X3_FILTER_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_3X3_FILTER_H_ +#include + #include #include "ruy/profiler/instrumentation.h" // from @ruy @@ -32,7 +34,6 @@ namespace depthwise_conv { // Enable for arm64 except for the Nvidia Linux 4 Tegra (L4T) running on // Jetson TX-2. This compiler does not support the offsetof() macro. #if defined(__aarch64__) && !defined(GOOGLE_L4T) -#include // Represents the number of bytes offset from the start of the // DepthwiseConvParams struct. This is used in the asm to load parameters. diff --git a/tensorflow/lite/kernels/internal/reference/transpose_conv.h b/tensorflow/lite/kernels/internal/reference/transpose_conv.h index 8a51e0fa5e9742..744ed0f826b335 100644 --- a/tensorflow/lite/kernels/internal/reference/transpose_conv.h +++ b/tensorflow/lite/kernels/internal/reference/transpose_conv.h @@ -219,6 +219,103 @@ inline void TransposeConv( } } +inline void HybridTransposeConv( + const ConvParams& params, float* scaling_factors_ptr, + const RuntimeShape& input_shape, const int8_t* input_data, + const RuntimeShape& filter_shape, const int8_t* filter_data, + const RuntimeShape& bias_shape, const float* bias_data, + const RuntimeShape& output_shape, float* output_data, + const float* per_channel_scale, int32_t* input_offset) { + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = params.padding_values.width; + const int pad_height = params.padding_values.height; + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const float output_activation_min = params.float_activation_min; + const float output_activation_max = params.float_activation_max; + if (bias_data) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + // Although transpose convolution simplifies to convolution with transposed + // weights for strides of 1, non-unitary striding complicates matters. To + // keep this reference implementation as clear as possible, we use a + // "scatter" access pattern, where we loop through all the input elements, + // computing their influence on the output, rather than looping through the + // output elements in the typical "gather" access pattern of a conv. We + // therefore must initialize the output array to zero. + const int num_elements = output_shape.FlatSize(); + for (int i = 0; i < num_elements; i++) { + output_data[i] = 0.0f; + } + + // Loop through input elements one at a time. + for (int batch = 0; batch < batches; ++batch) { + const float scaling_factor = scaling_factors_ptr[batch]; + for (int in_y = 0; in_y < input_height; ++in_y) { + for (int in_x = 0; in_x < input_width; ++in_x) { + for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + // Loop through the output elements it will influence + const int out_x_origin = (in_x * stride_width) - pad_width; + const int out_y_origin = (in_y * stride_height) - pad_height; + for (int filter_y = 0; filter_y < filter_height; ++filter_y) { + for (int filter_x = 0; filter_x < filter_width; ++filter_x) { + for (int out_channel = 0; out_channel < output_depth; + ++out_channel) { + // Compute output element location + const int out_x = out_x_origin + filter_x; + const int out_y = out_y_origin + filter_y; + // We cannot accumulate out of bounds + if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && + (out_y < output_height)) { + int32_t input_value = input_data[Offset( + input_shape, batch, in_y, in_x, in_channel)]; + int32_t filter_value = + filter_data[Offset(filter_shape, out_channel, filter_y, + filter_x, in_channel)]; + int32_t acc = + (input_value - input_offset[batch]) * filter_value; + output_data[Offset(output_shape, batch, out_y, out_x, + out_channel)] += + acc * per_channel_scale[out_channel] * scaling_factor; + } + } + } + } + } + } + } + } + + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + float acc = output_data[Offset(output_shape, batch, out_y, out_x, + out_channel)]; + if (bias_data) acc += bias_data[out_channel]; + + output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = + ActivationFunctionWithMinMax(acc, output_activation_min, + output_activation_max); + } + } + } + } +} + } // namespace reference_ops } // namespace tflite diff --git a/tensorflow/lite/kernels/kernel_util.cc b/tensorflow/lite/kernels/kernel_util.cc index 58fd99f8fb15be..39f7bc7da53a49 100644 --- a/tensorflow/lite/kernels/kernel_util.cc +++ b/tensorflow/lite/kernels/kernel_util.cc @@ -572,12 +572,11 @@ int TfLiteTypeGetSize(TfLiteType type) { bool IsMobilePlatform() { #if defined(ANDROID) || defined(__ANDROID__) return true; -#elif defined(__APPLE__) -#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE +#elif defined(__APPLE__) && (TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE) return true; -#endif -#endif +#else return false; +#endif } bool HasUnspecifiedDimension(const TfLiteTensor* tensor) { diff --git a/tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.h b/tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.h index 971ddad7f95feb..f25e10f235d0e7 100644 --- a/tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.h +++ b/tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.h @@ -450,7 +450,7 @@ struct FeatureProtos { // Map from feature name to FeatureProtos for that feature. using FeatureProtosMap = absl::flat_hash_map; -string ExampleName(const gtl::ArraySlice example_names, int n); +string ExampleName(const absl::Span example_names, int n); // Return the number of bytes elements parsed, or -1 on error. If out is null, // this method simply counts the number of elements without any copying. diff --git a/tensorflow/lite/kernels/parse_example/parse_example.cc b/tensorflow/lite/kernels/parse_example/parse_example.cc index 86ac20625c7cde..0d3e8657c2a7fe 100644 --- a/tensorflow/lite/kernels/parse_example/parse_example.cc +++ b/tensorflow/lite/kernels/parse_example/parse_example.cc @@ -430,7 +430,7 @@ void CopySparseBufferToTensor(tf::DataType dtype, size_t offset, } } -inline void CopyToBuffer(tf::gtl::ArraySlice vec, char* tensor_buffer, +inline void CopyToBuffer(absl::Span vec, char* tensor_buffer, int num_examples, int batch_size, int elements_per_stride) { int i = 0, k = 0; @@ -454,7 +454,7 @@ inline void CopyToBuffer(tf::gtl::ArraySlice vec, char* tensor_buffer, Status FastParseExampleLite( const FastParseExampleConfig& config, const TfLiteTensor* serialized, - tf::gtl::ArraySlice example_names, bool* quick_filter, + absl::Span example_names, bool* quick_filter, int quick_filter_size, const std::unique_ptr& config_index, int config_index_size, SeededHasher* hasher, TfLiteResult* result, std::map& stats, TfLiteContext* context) { @@ -633,7 +633,7 @@ Status FastParseExampleLite( memcpy(tensor_buffer + sizeof(int32_t) * (i + 1), &offset_i, sizeof(int32_t)); } - tf::gtl::ArraySlice slice(vec.data(), vec.size()); + absl::Span slice(vec.data(), vec.size()); CopyToBuffer(slice, tensor_buffer + start, count, batch_size, elements_per_stride); } diff --git a/tensorflow/lite/kernels/transpose_conv.cc b/tensorflow/lite/kernels/transpose_conv.cc index 93c6df28890c9c..7d7c9a410ef451 100644 --- a/tensorflow/lite/kernels/transpose_conv.cc +++ b/tensorflow/lite/kernels/transpose_conv.cc @@ -26,8 +26,10 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/optimized/integer_ops/transpose_conv.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" // NOLINTNEXTLINE - This header file shouldn't go to the top. +#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" +#include "tensorflow/lite/kernels/internal/reference/transpose_conv.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" @@ -59,6 +61,9 @@ struct OpData { int col2im_id = kTensorNotAllocated; int transposed_weights_id = kTensorNotAllocated; int scratch_tensor_id = kTensorNotAllocated; + int input_quantized_id = kTensorNotAllocated; + int scaling_factors_id = kTensorNotAllocated; + int input_offset_id = kTensorNotAllocated; // col2im is the temporary tensor allocated and used in optimized path for // storing col2im data:gemm result for input_matrix x filter_matrix. @@ -73,6 +78,11 @@ struct OpData { // results. int32_t scratch_tensor_index; + // Indexes are used for hybrid (dynamic range quantization) path. + int32_t input_quantized_index; + int32_t scaling_factors_index; + int32_t input_offset_index; + TfLitePaddingValues padding; // The scaling factor from input to output (aka the 'real multiplier') can // be represented as a fixed point multiplier plus a left shift. @@ -160,6 +170,32 @@ static TfLiteStatus AllocateTemporaryTensorsIfRequired(TfLiteContext* context, ++temporaries_count; } + if (input_type == kTfLiteFloat32 && weights_type == kTfLiteInt8) { + // Allocate tensor to store the on-the-fly quantized inputs. + data->input_quantized_index = temporaries_count; + if (data->input_quantized_id == kTensorNotAllocated) { + TF_LITE_ENSURE_OK( + context, context->AddTensors(context, 1, &data->input_quantized_id)); + } + ++temporaries_count; + + // Allocate tensor to store the quantization params computed during + // on-the-fly input quantization. + data->scaling_factors_index = temporaries_count; + if (data->scaling_factors_id == kTensorNotAllocated) { + TF_LITE_ENSURE_OK( + context, context->AddTensors(context, 1, &data->scaling_factors_id)); + } + ++temporaries_count; + + data->input_offset_index = temporaries_count; + if (data->input_offset_id == kTensorNotAllocated) { + TF_LITE_ENSURE_OK( + context, context->AddTensors(context, 1, &data->input_offset_id)); + } + ++temporaries_count; + } + TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(temporaries_count); @@ -308,8 +344,6 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { bias->type == params->quantized_bias_type); data->quantized_bias_type = params->quantized_bias_type; } - } else { - TF_LITE_ENSURE_TYPES_EQ(context, weights->type, input->type); } TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); // Ensure that weights and inputs have the same channel dimension. @@ -406,6 +440,69 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { data->per_channel_output_shift.data(), channels_out)); } + if (input->type == kTfLiteFloat32 && weights->type == kTfLiteInt8) { + node->temporaries->data[data->input_quantized_index] = + data->input_quantized_id; + TfLiteTensor* input_quantized; + TF_LITE_ENSURE_OK( + context, GetTemporarySafe(context, node, data->input_quantized_index, + &input_quantized)); + input_quantized->type = kTfLiteInt8; + input_quantized->allocation_type = kTfLiteArenaRw; + if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { + TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims); + TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, + input_quantized_size)); + } + + node->temporaries->data[data->scaling_factors_index] = + data->scaling_factors_id; + TfLiteTensor* scaling_factors; + TF_LITE_ENSURE_OK( + context, GetTemporarySafe(context, node, data->scaling_factors_index, + &scaling_factors)); + scaling_factors->type = kTfLiteFloat32; + scaling_factors->allocation_type = kTfLiteArenaRw; + // Only one scale factor per batch is typically necessary. See optimized + // implementation for why we need to allocate for the height of the inputs + // flattened to 2D. + const int channels_in = weights->dims->data[3]; + TF_LITE_ENSURE(context, channels_in != 0); + const int height = NumElements(input) / channels_in; + int scaling_dims[1] = {height}; + if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { + TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); + scaling_factors_size->data[0] = height; + TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, + scaling_factors_size)); + } + + const auto* affine_quantization = + reinterpret_cast( + weights->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + TF_LITE_ENSURE_EQ( + context, affine_quantization->scale->size, + weights->dims->data[affine_quantization->quantized_dimension]); + node->temporaries->data[data->input_offset_index] = data->input_offset_id; + TfLiteTensor* input_offsets; + TF_LITE_ENSURE_OK(context, + GetTemporarySafe(context, node, data->input_offset_index, + &input_offsets)); + input_offsets->type = kTfLiteInt32; + input_offsets->allocation_type = kTfLiteArenaRw; + // See above comment for the need to allocate for height of inputs. + TF_LITE_ENSURE(context, channels_in != 0); + const int input_offset_dims[1] = {height}; + if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, input_offset_dims)) { + TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1); + input_offsets_size->data[0] = input_offset_dims[0]; + TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets, + input_offsets_size)); + } + } + return kTfLiteOk; } @@ -617,6 +714,67 @@ void EvalQuantizedPerChannel16x8( } } +TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node, + const TfLiteTransposeConvParams* params, OpData* data, + const TfLiteTensor* input, const TfLiteTensor* weights, + const TfLiteTensor* bias, TfLiteTensor* output) { + float output_activation_min, output_activation_max; + CalculateActivationRange(params->activation, &output_activation_min, + &output_activation_max); + + const int batch_size = SizeOfDimension(input, 0); + TF_LITE_ENSURE(context, batch_size != 0); + const int input_size = NumElements(input) / batch_size; + TfLiteTensor* quantized_input_tensor; + TF_LITE_ENSURE_OK(context, + GetTemporarySafe(context, node, data->input_quantized_index, + &quantized_input_tensor)); + int8_t* quantized_input_ptr_batch = + GetTensorData(quantized_input_tensor); + TfLiteTensor* scaling_factors_tensor; + TF_LITE_ENSURE_OK(context, + GetTemporarySafe(context, node, data->scaling_factors_index, + &scaling_factors_tensor)); + float* scaling_factors_ptr = GetTensorData(scaling_factors_tensor); + TfLiteTensor* input_offset_tensor; + TF_LITE_ENSURE_OK(context, + GetTemporarySafe(context, node, data->input_offset_index, + &input_offset_tensor)); + int32_t* input_offset_ptr = GetTensorData(input_offset_tensor); + + for (int b = 0; b < batch_size; ++b) { + const int offset = b * input_size; + tensor_utils::AsymmetricQuantizeFloats( + GetTensorData(input) + offset, input_size, + quantized_input_ptr_batch + offset, &scaling_factors_ptr[b], + &input_offset_ptr[b]); + } + + const auto* affine_quantization = + reinterpret_cast(weights->quantization.params); + + tflite::ConvParams op_params; + op_params.padding_type = PaddingType::kSame; + op_params.padding_values.width = data->padding.width; + op_params.padding_values.height = data->padding.height; + op_params.padding_values.width_offset = data->padding.width_offset; + op_params.padding_values.height_offset = data->padding.height_offset; + op_params.stride_width = params->stride_width; + op_params.stride_height = params->stride_height; + op_params.float_activation_min = output_activation_min; + op_params.float_activation_max = output_activation_max; + + reference_ops::HybridTransposeConv( + op_params, scaling_factors_ptr, GetTensorShape(input), + quantized_input_ptr_batch, GetTensorShape(weights), + GetTensorData(weights), GetTensorShape(bias), + GetTensorData(bias), GetTensorShape(output), + GetTensorData(output), affine_quantization->scale->data, + input_offset_ptr); + + return kTfLiteOk; +} + template TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // Retrieve tensors (All should be allocated by now) @@ -677,14 +835,19 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // Currently support float32, uint8, int8, int16. switch (input->type) { case kTfLiteFloat32: { - // Only for GenericOptimized path, we use transposed weights. - if (data->weights_are_transposed) { - if (!IsConstantTensor(weights)) { - ResizeAndTransposeWeights(context, weights, transposed_weights); + if (weights->type == kTfLiteInt8) { + TF_LITE_ENSURE_OK(context, EvalHybrid(context, node, params, data, + input, weights, bias, output)); + } else { + // Only for GenericOptimized path, we use transposed weights. + if (data->weights_are_transposed) { + if (!IsConstantTensor(weights)) { + ResizeAndTransposeWeights(context, weights, transposed_weights); + } } + EvalFloat(context, params, data, input, weights, bias, + transposed_weights, col2im, output); } - EvalFloat(context, params, data, input, weights, bias, - transposed_weights, col2im, output); break; } case kTfLiteUInt8: { diff --git a/tensorflow/lite/kernels/transpose_conv_test.cc b/tensorflow/lite/kernels/transpose_conv_test.cc index 84dfd29dfb9945..bbe7d3f022c39b 100644 --- a/tensorflow/lite/kernels/transpose_conv_test.cc +++ b/tensorflow/lite/kernels/transpose_conv_test.cc @@ -1251,6 +1251,102 @@ TEST_P(TransposeConvOpTest, SimpleBiasTestQuantizedPerChannel16x8Bias64) { EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 3, 2})); } +class HybridTransposeConvOpModel + : public BaseTransposeConvBiasOpModel { + public: + using BaseTransposeConvBiasOpModel::BaseTransposeConvBiasOpModel; + + void SetFilter(std::initializer_list f) { + PerChannelSymmetricQuantizeAndPopulate(filter_, f); + } + + void SetBias(std::initializer_list b) { PopulateTensor(bias_, b); } + + std::vector GetOutput() { return ExtractVector(output_); } +}; + +TEST_P(TransposeConvOpTest, SimpleTestHybridInt8) { + const std::initializer_list filter_data = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + const std::initializer_list const_filter_data = {14, 28, 42, 56, 71, + 85, 99, 113, 127}; + HybridTransposeConvOpModel model( + /*registration=*/GetRegistration(), /*output_shape_data=*/{1, 4, 4, 1}, + /*filter=*/ + {TensorType_INT8, {1, 3, 3, 1}, 0, 0, 0, 0, true, {9.0 / 127}, {0}, 0}, + /*filter_data=*/const_filter_data, + /*input=*/{TensorType_FLOAT32, {1, 4, 4, 1}}, + /*output=*/{TensorType_FLOAT32, {}}, + /*padding=*/Padding_SAME, /*stride_w=*/1, /*stride_h=*/1, + /*fused_activation=*/ActivationFunctionType_NONE, + /*test_type=*/GetTestType(), + /*version=*/3, + /*bias_type=*/TensorType_FLOAT32); + model.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); + if (GetTestType() == TestType::kDynamic) { + model.SetFilter(filter_data); + } + + model.SetBias({1}); + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + // The values are taken from float model "SimpleTest". + EXPECT_THAT(model.GetOutput(), ElementsAreArray(ArrayFloatNear( + {30, 63, 84, 76, 100, 193, 238, 199, 208, + 373, 417.5, 331, 263.7, 447, 486, 366.5}, + 0.19))); + + // GetOutputShape() should always be same as model.SetOutputShape(...); + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 4, 1})); +} + +TEST_P(TransposeConvOpTest, SimpleTestHybridInt8MultiChannel) { + const std::initializer_list filter_data = { + 1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6, 8, 10, 12, 14, 16, 18}; + const std::initializer_list const_filter_data = { + 7, 22, 37, 52, 67, 82, 97, 112, 127, + 14, 28, 42, 56, 71, 85, 99, 113, 127}; + HybridTransposeConvOpModel model( + /*registration=*/GetRegistration(), /*output_shape_data=*/{1, 5, 5, 2}, + /*filter=*/ + {TensorType_INT8, + {2, 3, 3, 1}, + 0, + 0, + 0, + 0, + true, + {17.0 / 127, 18.0 / 127}, + {0, 0}, + 0}, + /*filter_data=*/const_filter_data, + /*input=*/{TensorType_FLOAT32, {1, 2, 2, 1}}, + /*output=*/{TensorType_FLOAT32, {}}, + /*padding=*/Padding_VALID, /*stride_w=*/2, /*stride_h=*/2, + /*fused_activation=*/ActivationFunctionType_NONE, + /*test_type=*/GetTestType(), + /*version=*/3, + /*bias_type=*/TensorType_FLOAT32); + + model.SetInput({1, 2, 3, 4}); + if (GetTestType() == TestType::kDynamic) { + model.SetFilter(filter_data); + } + model.SetBias({3, 4}); + + ASSERT_EQ(model.Invoke(), kTfLiteOk); + + // The values are taken from float model "MultiChannelBiasTest". + EXPECT_THAT( + model.GetOutput(), + ElementsAreArray(ArrayFloatNear( + {4, 6, 6, 8, 10, 14, 9, 12, 13, 16, 10, 12, 12, + 14, 28, 32, 21, 24, 25, 28, 19, 24, 27, 32, 64.5, 76, + 44.5, 52, 56.5, 63.5, 24, 28, 30, 34, 63.5, 72, 39, 44, 47, + 52, 42, 46, 48, 52, 106, 114, 63, 68, 71, 76}, + 0.26))); + EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 5, 5, 2})); +} + INSTANTIATE_TEST_SUITE_P( TransposeConvOpTest, TransposeConvOpTest, ::testing::Combine( diff --git a/tensorflow/lite/python/lite_v2_test.py b/tensorflow/lite/python/lite_v2_test.py index 9a9a502b829e35..687c87d03108a8 100644 --- a/tensorflow/lite/python/lite_v2_test.py +++ b/tensorflow/lite/python/lite_v2_test.py @@ -391,9 +391,19 @@ def _createV2QATSavedModelWithFloatOpsAtEnd(self): """Create a simple QAT SavedModel that includes float ops at the end.""" saved_model_dir = os.path.join(self.get_temp_dir(), 'qat_float_ops_at_end') input_tensor = tf.keras.layers.Input((32, 32, 128)) - x = tf.quantization.fake_quant_with_min_max_args(input_tensor, -3.0, 3.0) + + class _FakeQuantArgsLayer(tf.keras.layers.Layer): + """A fake quantization layer with fake_quant_with_min_max_args. + + Keras 3 requires wrapping the tf function inside Keras layer. + """ + + def call(self, x): + return tf.quantization.fake_quant_with_min_max_args(x, -3.0, 3.0) + + x = _FakeQuantArgsLayer()(input_tensor) x = tf.keras.layers.Conv2D(1, (3, 3))(x) - x = tf.quantization.fake_quant_with_min_max_args(x, -3.0, 3.0) + x = _FakeQuantArgsLayer()(x) # Exclude the quantization of the following Dense layer by not putting # fake quant layer after the dense layer. output_tensor = tf.keras.layers.Dense(1, activation='sigmoid')(x) @@ -1607,10 +1617,19 @@ def _createV2QATSavedModel(self, shape): input_name = 'input' output_name = 'scores' + class _FakeQuantArgsLayer(tf.keras.layers.Layer): + """A fake quantization layer with fake_quant_with_min_max_args. + + Keras 3 requires wrapping the tf function inside Keras layer. + """ + + def call(self, x): + return tf.quantization.fake_quant_with_min_max_args(x, -3.0, 3.0) + input_tensor = tf.keras.layers.Input((32, 32, 128), name=input_name) - x = tf.quantization.fake_quant_with_min_max_args(input_tensor, -3.0, 3.0) + x = _FakeQuantArgsLayer()(input_tensor) x = tf.keras.layers.Conv2D(1, (3, 3))(x) - x = tf.quantization.fake_quant_with_min_max_args(x, -3.0, 3.0) + x = _FakeQuantArgsLayer()(x) scores = tf.keras.layers.Reshape((-1,), name=output_name)(x) model = tf.keras.Model(input_tensor, scores) model.save(saved_model_dir) @@ -2679,9 +2698,18 @@ def testKerasFullyConnectedOutputShape3D(self): batch_size=1, shape=[3, 3], name='input_tensor', dtype=tf.float32 ) - x = tf.quantization.fake_quant_with_min_max_args(input_tensor, -3.0, 3.0) + class _FakeQuantArgsLayer(tf.keras.layers.Layer): + """A fake quantization layer with fake_quant_with_min_max_args. + + Keras 3 requires wrapping the tf function inside Keras layer. + """ + + def call(self, x): + return tf.quantization.fake_quant_with_min_max_args(x, -3.0, 3.0) + + x = _FakeQuantArgsLayer()(input_tensor) x = tf.keras.layers.Dense(3)(x) - x = tf.quantization.fake_quant_with_min_max_args(x, -3.0, 3.0) + x = _FakeQuantArgsLayer()(x) model = tf.keras.Model(input_tensor, x) model.compile( @@ -2736,14 +2764,20 @@ def call(self, inputs): inputs, filters, [*inputs.shape[:-1], 24], 1 ) + class _FakeQuantVarsLayer(tf.keras.layers.Layer): + """A fake quantization layer with fake_quant_with_min_max_vars. + + Keras 3 requires wrapping the tf function inside Keras layer. + """ + + def call(self, x): + return tf.quantization.fake_quant_with_min_max_vars( + x, -3.0, 3.0, narrow_range=True) + inp = tf.keras.Input(shape=(6, 8, 48), batch_size=1) - x = tf.quantization.fake_quant_with_min_max_vars( - inp, -3.0, 3.0, narrow_range=True - ) + x = _FakeQuantVarsLayer()(inp) x = QuantConv2DTransposed()(x) - x = tf.quantization.fake_quant_with_min_max_vars( - x, -3.0, 3.0, narrow_range=True - ) + x = _FakeQuantVarsLayer()(x) model = tf.keras.Model(inp, x) @@ -3512,10 +3546,18 @@ def call(self, inputs): result, -3.0, 3.0, narrow_range=True ) + class _FakeQuantVarsLayer(tf.keras.layers.Layer): + """A fake quantization layer with fake_quant_with_min_max_vars. + + Keras 3 requires wrapping the tf function inside Keras layer. + """ + + def call(self, x): + return tf.quantization.fake_quant_with_min_max_vars( + x, -3.0, 3.0, narrow_range=True) + inp = tf.keras.Input(shape=(6, 8, 6), batch_size=1) - x = tf.quantization.fake_quant_with_min_max_vars( - inp, -3.0, 3.0, narrow_range=True - ) + x = _FakeQuantVarsLayer()(inp) x = QuantConv2DTransposedWithBiasAndActivation()(x) model = tf.keras.Model(inp, x) diff --git a/tensorflow/lite/python/util_test.py b/tensorflow/lite/python/util_test.py index 3a8b212115c376..5c09fd47171bd8 100644 --- a/tensorflow/lite/python/util_test.py +++ b/tensorflow/lite/python/util_test.py @@ -274,7 +274,7 @@ def _generate_integer_tflite_model(quantization_type=dtypes.int8, # Convert TF Model to an Integer Quantized TFLite Model converter = tf.lite.TFLiteConverter.from_keras_model(model) else: - model.save(saved_model_dir) + tf.saved_model.save(model, saved_model_dir) converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) converter.optimizations = {tf.lite.Optimize.DEFAULT} diff --git a/tensorflow/lite/schema/BUILD b/tensorflow/lite/schema/BUILD index fafd7a24e0edb7..29d09105e59950 100644 --- a/tensorflow/lite/schema/BUILD +++ b/tensorflow/lite/schema/BUILD @@ -122,7 +122,7 @@ flatbuffer_cc_library( # Generic schema for flatbuffer converter (but with mutable makes bigger). flatbuffer_cc_library( name = "schema_fbs_with_mutable", - srcs = ["schema.fbs"], + srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"], compatible_with = get_compatible_with_portable(), flatc_args = [ "--gen-mutable", @@ -134,7 +134,7 @@ flatbuffer_cc_library( # Generic schema for inference on device (but with reflections makes bigger). flatbuffer_cc_library( name = "schema_fbs_with_reflection", - srcs = ["schema.fbs"], + srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"], compatible_with = get_compatible_with_portable(), flatc_args = [ "--reflect-types", diff --git a/tensorflow/lite/tflite_with_xnnpack.cc b/tensorflow/lite/tflite_with_xnnpack.cc index 22e8617ec74e21..d443d404c21f05 100644 --- a/tensorflow/lite/tflite_with_xnnpack.cc +++ b/tensorflow/lite/tflite_with_xnnpack.cc @@ -23,6 +23,10 @@ namespace tflite { std::unique_ptr AcquireXNNPACKDelegate() { auto opts = TfLiteXNNPackDelegateOptionsDefault(); +#ifdef TFLITE_XNNPACK_DELEGATE_EXPERIMENTAL_WEIGHT_CACHE_FILE_PATH + opts.experimental_weight_cache_file_path = + TFLITE_XNNPACK_DELEGATE_EXPERIMENTAL_WEIGHT_CACHE_FILE_PATH; +#endif return std::unique_ptr( TfLiteXNNPackDelegateCreate(&opts), TfLiteXNNPackDelegateDelete); } diff --git a/tensorflow/lite/tools/benchmark/README.md b/tensorflow/lite/tools/benchmark/README.md index aa607ea952e682..f25da51705d6b8 100644 --- a/tensorflow/lite/tools/benchmark/README.md +++ b/tensorflow/lite/tools/benchmark/README.md @@ -250,6 +250,11 @@ delegate first, and then the XNNPACK delegate secondly. ## To build/install/run +Note: The benchmarking tool must be compiled with a TFLite runtime that +supports the ops found in the model to be tested.
+If Tensorflow Ops ("flex ops") +or other custom ops are used in the model, please see the section [below](#build-the-benchmark-tool-with-tensorflow-ops-support). + ### On Android: (0) Refer to https://www.tensorflow.org/lite/guide/build_android to edit the @@ -308,8 +313,8 @@ adb shell /data/local/tmp/benchmark_model \ bazel build -c opt tensorflow/lite/tools/benchmark:benchmark_model ``` -(2) Run on your compute graph, similar to the Android case but without the need of adb shell. -For example: +(2) Run on your compute graph, similar to the Android case but without the need +of adb shell. For example: ``` bazel-bin/tensorflow/lite/tools/benchmark/benchmark_model \ @@ -442,12 +447,19 @@ some additional parameters as detailed below. ## Build the benchmark tool with Tensorflow ops support -You can build the benchmark tool with [Tensorflow operators support](https://www.tensorflow.org/lite/guide/ops_select). +If you see an error that says: `ERROR: Select TensorFlow op(s), included in the +given model, is(are) not supported by this interpreter.` you will need to +build with [Tensorflow operators support](https://www.tensorflow.org/lite/guide/ops_select). + +Having Tensorflow ops in the TFLite file works when the benchmark tool is built +with Tensorflow ops support. It doesn't require any additional option to use it. ### How to build -To build the tool, you need to use 'benchmark_model_plus_flex' target with -'--config=monolithic' option. +To build the tool, you need to use the `benchmark_model_plus_flex` target with +the `--config=monolithic` flag. + +**Desktop** ``` bazel build -c opt \ @@ -455,12 +467,53 @@ bazel build -c opt \ tensorflow/lite/tools/benchmark:benchmark_model_plus_flex ``` +**Android** + +``` +bazel build -c opt \ + --config=monolithic --config=android_arm64 \ + tensorflow/lite/tools/benchmark:benchmark_model_plus_flex +``` + ### How to benchmark tflite model with Tensorflow ops -Tensorflow ops support just works the benchmark tool is built with Tensorflow -ops support. It doesn't require any additional option to use it. +Follow the further instructions [above](#to-buildinstallrun) replacing +`benchmark_model` with the `benchmark_model_plus_flex` file created here. + +For example, on desktop it's very easy: ``` bazel-bin/tensorflow/lite/tools/benchmark/benchmark_model_plus_flex \ --graph=model_converted_with_TF_ops.tflite \ ``` + +## Build the benchmark tool with Custom ops support + +If you see an error that says `ERROR: Op type not registered 'XXXXXXXX' +in binary running on localhost.` for custom ops running in your TFLite model, +you will need to manually build the tool to include your libraries providing +the custom ops. + +### How to build + +While possible, this is not necessarily supported. + +However, you should be able to create a new `cc_binary` rule that depends on +`tensorflow/lite/tools/benchmark:benchmark_model_main` along with your custom op +rules. + +``` +cc_binary( + name = "benchmark_model_plus_custom_ops", + deps = [ + ":my_custom_ops_provider", + "//tensorflow/lite/tools/benchmark:benchmark_model_main", + ], +) +``` + +### How to benchmark tflite model with Custom ops + +Use the `benchmark_model_plus_custom_ops` (or whatever) file created by your +custom rule instead of the `benchmark_model` file in the instructions, +[above](#to-buildinstallrun). diff --git a/tensorflow/lite/tools/cmake/modules/flatbuffers.cmake b/tensorflow/lite/tools/cmake/modules/flatbuffers.cmake index a768574923932a..8febb1fbd1c706 100644 --- a/tensorflow/lite/tools/cmake/modules/flatbuffers.cmake +++ b/tensorflow/lite/tools/cmake/modules/flatbuffers.cmake @@ -23,8 +23,9 @@ OverridableFetchContent_Declare( flatbuffers GIT_REPOSITORY https://github.com/google/flatbuffers # Sync with tensorflow/third_party/flatbuffers/workspace.bzl - GIT_TAG v24.3.25 - GIT_SHALLOW TRUE + GIT_TAG e6463926479bd6b330cbcf673f7e917803fd5831 + # NOTE: b/340264458 - `GIT_SHALLOW TRUE` works for tag name only. + GIT_SHALLOW FALSE GIT_PROGRESS TRUE SOURCE_DIR "${CMAKE_BINARY_DIR}/flatbuffers" ) diff --git a/tensorflow/lite/tools/delegates/BUILD b/tensorflow/lite/tools/delegates/BUILD index a6ba60436ebcee..97ab1d7b0787b1 100644 --- a/tensorflow/lite/tools/delegates/BUILD +++ b/tensorflow/lite/tools/delegates/BUILD @@ -173,6 +173,20 @@ cc_library_with_tflite( alwayslink = 1, ) +cc_test( + name = "xnnpack_delegate_provider_test", + srcs = ["xnnpack_delegate_provider_test.cc"], + copts = tflite_copts(), + visibility = ["//visibility:public"], + deps = [ + ":delegate_provider_hdr", + ":xnnpack_delegate_provider", + "//tensorflow/lite/delegates/xnnpack:xnnpack_delegate", + "//tensorflow/lite/tools:tool_params", + "@com_google_googletest//:gtest_main", + ], +) + cc_library( name = "external_delegate_provider", srcs = ["external_delegate_provider.cc"], diff --git a/tensorflow/lite/tools/delegates/xnnpack_delegate_provider.cc b/tensorflow/lite/tools/delegates/xnnpack_delegate_provider.cc index 7d0ef212966397..9a9f7236991438 100644 --- a/tensorflow/lite/tools/delegates/xnnpack_delegate_provider.cc +++ b/tensorflow/lite/tools/delegates/xnnpack_delegate_provider.cc @@ -27,6 +27,8 @@ class XnnpackDelegateProvider : public DelegateProvider { default_params_.AddParam("use_xnnpack", ToolParam::Create(false)); default_params_.AddParam("xnnpack_force_fp16", ToolParam::Create(false)); + default_params_.AddParam("xnnpack_experimental_weight_cache_file_path", + ToolParam::Create("")); } std::vector CreateFlags(ToolParams* params) const final; @@ -54,6 +56,8 @@ std::vector XnnpackDelegateProvider::CreateFlags( "false explicitly."), CreateFlag("xnnpack_force_fp16", params, "enforce float16 inference."), + CreateFlag("xnnpack_experimental_weight_cache_file_path", + params, "enable file-backed weight caching."), }; return flags; } @@ -63,6 +67,9 @@ void XnnpackDelegateProvider::LogParams(const ToolParams& params, LOG_TOOL_PARAM(params, bool, "use_xnnpack", "Use xnnpack", verbose); LOG_TOOL_PARAM(params, bool, "xnnpack_force_fp16", "xnnpack_force_fp16", verbose); + LOG_TOOL_PARAM(params, std::string, + "xnnpack_experimental_weight_cache_file_path", + "xnnpack_experimental_weight_cache_file_path", verbose); } TfLiteDelegatePtr XnnpackDelegateProvider::CreateTfLiteDelegate( @@ -70,7 +77,9 @@ TfLiteDelegatePtr XnnpackDelegateProvider::CreateTfLiteDelegate( if (params.Get("use_xnnpack")) { return evaluation::CreateXNNPACKDelegate( params.Get("num_threads"), - params.Get("xnnpack_force_fp16")); + params.Get("xnnpack_force_fp16"), + params.Get("xnnpack_experimental_weight_cache_file_path") + .c_str()); } return CreateNullDelegate(); } diff --git a/tensorflow/lite/tools/delegates/xnnpack_delegate_provider_test.cc b/tensorflow/lite/tools/delegates/xnnpack_delegate_provider_test.cc new file mode 100644 index 00000000000000..19fd593fa51136 --- /dev/null +++ b/tensorflow/lite/tools/delegates/xnnpack_delegate_provider_test.cc @@ -0,0 +1,82 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include + +#include +#include +#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" +#include "tensorflow/lite/tools/delegates/delegate_provider.h" +#include "tensorflow/lite/tools/tool_params.h" + +namespace tflite { +namespace tools { +namespace { + +TEST(XNNPackDelegateProviderTest, Test) { + const char kFakeCacheParam[] = "/tmp/random/path"; + + const auto& providers = GetRegisteredDelegateProviders(); + ASSERT_EQ(providers.size(), 1); + ToolParams params; + + const auto& xnnpack_provider = providers[0]; + ASSERT_NE(xnnpack_provider, nullptr); + + params.Merge(xnnpack_provider->DefaultParams()); + params.AddParam("num_threads", ToolParam::Create(-1)); + + EXPECT_TRUE(params.HasParam("use_xnnpack")); + EXPECT_FALSE(params.HasValueSet("use_xnnpack")); + ASSERT_NE(params.GetParam("use_xnnpack"), nullptr); + + EXPECT_TRUE(params.HasParam("xnnpack_force_fp16")); + EXPECT_FALSE(params.HasValueSet("xnnpack_force_fp16")); + ASSERT_NE(params.GetParam("xnnpack_force_fp16"), nullptr); + + EXPECT_TRUE(params.HasParam("xnnpack_experimental_weight_cache_file_path")); + EXPECT_FALSE(params.HasValueSet( + "xnnpack_experimental_weight_cache_file_path")); + ASSERT_NE(params.GetParam("xnnpack_experimental_weight_cache_file_path"), + nullptr); + + params.Set("use_xnnpack", true, /*position=*/0); + + { + TfLiteDelegatePtr delegate = xnnpack_provider->CreateTfLiteDelegate(params); + const TfLiteXNNPackDelegateOptions* options = + TfLiteXNNPackDelegateGetOptions(delegate.get()); + ASSERT_NE(options, nullptr); + EXPECT_EQ(options->experimental_weight_cache_file_path, nullptr); + } + + params.Set("xnnpack_force_fp16", true, /*position=*/1); + params.Set("xnnpack_experimental_weight_cache_file_path", + kFakeCacheParam, /*position=*/2); + { + TfLiteDelegatePtr delegate = xnnpack_provider->CreateTfLiteDelegate(params); + const TfLiteXNNPackDelegateOptions* options = + TfLiteXNNPackDelegateGetOptions(delegate.get()); + ASSERT_NE(options, nullptr); + EXPECT_THAT(options->experimental_weight_cache_file_path, + testing::StrEq(kFakeCacheParam)); + EXPECT_TRUE(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16); + } +} + +} // namespace +} // namespace tools +} // namespace tflite diff --git a/tensorflow/lite/tools/evaluation/BUILD b/tensorflow/lite/tools/evaluation/BUILD index 914c33471616e8..a095d0934fa4bf 100644 --- a/tensorflow/lite/tools/evaluation/BUILD +++ b/tensorflow/lite/tools/evaluation/BUILD @@ -75,7 +75,9 @@ cc_library_with_stable_tflite_abi( "//tensorflow/lite/acceleration/configuration/c:xnnpack_plugin", ], }], - deps = select({ + deps = [ + "@flatbuffers", + ] + select({ "//tensorflow/lite:tflite_with_xnnpack_explicit_false": [], "//conditions:default": [ "//tensorflow/lite/acceleration/configuration:configuration_fbs", diff --git a/tensorflow/lite/tools/evaluation/stages/utils/image_metrics_test.cc b/tensorflow/lite/tools/evaluation/stages/utils/image_metrics_test.cc index 11d71d663fff22..5e95165081781c 100644 --- a/tensorflow/lite/tools/evaluation/stages/utils/image_metrics_test.cc +++ b/tensorflow/lite/tools/evaluation/stages/utils/image_metrics_test.cc @@ -20,7 +20,6 @@ limitations under the License. #include #include -#include #include namespace tflite { diff --git a/tensorflow/lite/tools/evaluation/utils.cc b/tensorflow/lite/tools/evaluation/utils.cc index 20221df8d37c91..6628911f79441e 100644 --- a/tensorflow/lite/tools/evaluation/utils.cc +++ b/tensorflow/lite/tools/evaluation/utils.cc @@ -15,7 +15,19 @@ limitations under the License. #include "tensorflow/lite/tools/evaluation/utils.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "flatbuffers/buffer.h" // from @flatbuffers +#include "flatbuffers/string.h" // from @flatbuffers #include "tensorflow/lite/tools/delegates/delegate_provider.h" + #if defined(__APPLE__) #include "TargetConditionals.h" #if (TARGET_OS_IPHONE && !TARGET_IPHONE_SIMULATOR) || \ @@ -31,6 +43,7 @@ limitations under the License. #include "tensorflow/lite/acceleration/configuration/c/xnnpack_plugin.h" #include "tensorflow/lite/acceleration/configuration/configuration_generated.h" #include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" #endif // !defined(TFLITE_WITHOUT_XNNPACK) #if !defined(_WIN32) @@ -38,11 +51,6 @@ limitations under the License. #endif #include -#include -#include -#include -#include - namespace tflite { namespace evaluation { @@ -177,7 +185,9 @@ TfLiteDelegatePtr CreateHexagonDelegate( #endif // TFLITE_ENABLE_HEXAGON #ifdef TFLITE_WITHOUT_XNNPACK -TfLiteDelegatePtr CreateXNNPACKDelegate(int num_threads, bool force_fp16) { +TfLiteDelegatePtr CreateXNNPACKDelegate( + int num_threads, bool force_fp16, + const char* experimental_weight_cache_file_path) { return tools::CreateNullDelegate(); } #else // !defined(TFLITE_WITHOUT_XNNPACK) @@ -212,6 +222,12 @@ TfLiteDelegatePtr CreateXNNPACKDelegate() { TfLiteDelegatePtr CreateXNNPACKDelegate( const TfLiteXNNPackDelegateOptions* xnnpack_options) { flatbuffers::FlatBufferBuilder flatbuffer_builder; + flatbuffers::Offset experimental_weight_cache_file_path; + if (xnnpack_options->experimental_weight_cache_file_path) { + experimental_weight_cache_file_path = flatbuffer_builder.CreateString( + xnnpack_options->experimental_weight_cache_file_path); + } + tflite::XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder); int num_threads = xnnpack_options->num_threads; if (num_threads >= 0) { @@ -220,6 +236,8 @@ TfLiteDelegatePtr CreateXNNPACKDelegate( xnnpack_settings_builder.fbb_.AddElement( XNNPackSettings::VT_FLAGS, static_cast(xnnpack_options->flags), 0); + xnnpack_settings_builder.add_experimental_weight_cache_file_path( + experimental_weight_cache_file_path); flatbuffers::Offset xnnpack_settings = xnnpack_settings_builder.Finish(); tflite::TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder); @@ -240,7 +258,9 @@ TfLiteDelegatePtr CreateXNNPACKDelegate( return TfLiteDelegatePtr(delegate, delegate_deleter); } -TfLiteDelegatePtr CreateXNNPACKDelegate(int num_threads, bool force_fp16) { +TfLiteDelegatePtr CreateXNNPACKDelegate( + int num_threads, bool force_fp16, + const char* experimental_weight_cache_file_path) { auto opts = XNNPackDelegateOptionsDefault(); // Note that we don't want to use the thread pool for num_threads == 1. opts.num_threads = num_threads > 1 ? num_threads : 0; @@ -248,6 +268,12 @@ TfLiteDelegatePtr CreateXNNPACKDelegate(int num_threads, bool force_fp16) { TFLITE_LOG(INFO) << "XNNPack FP16 inference enabled."; opts.flags |= TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16; } + if (experimental_weight_cache_file_path && + experimental_weight_cache_file_path[0] != '\0') { + TFLITE_LOG(INFO) << "XNNPack file-backed weight cache enabled."; + opts.experimental_weight_cache_file_path = + experimental_weight_cache_file_path; + } return CreateXNNPACKDelegate(&opts); } #endif diff --git a/tensorflow/lite/tools/evaluation/utils.h b/tensorflow/lite/tools/evaluation/utils.h index 1735b9ee08a0fc..ed89f609770e4e 100644 --- a/tensorflow/lite/tools/evaluation/utils.h +++ b/tensorflow/lite/tools/evaluation/utils.h @@ -105,7 +105,9 @@ TfLiteDelegatePtr CreateXNNPACKDelegate(); TfLiteDelegatePtr CreateXNNPACKDelegate( const TfLiteXNNPackDelegateOptions* options); #endif // !defined(TFLITE_WITHOUT_XNNPACK) -TfLiteDelegatePtr CreateXNNPACKDelegate(int num_threads, bool force_fp16); +TfLiteDelegatePtr CreateXNNPACKDelegate( + int num_threads, bool force_fp16, + const char* experimental_weight_cache_file_path = nullptr); TfLiteDelegatePtr CreateCoreMlDelegate(); } // namespace evaluation diff --git a/tensorflow/lite/tools/tool_params.h b/tensorflow/lite/tools/tool_params.h index 70a16ccd2a73f6..09f64797f3eebf 100644 --- a/tensorflow/lite/tools/tool_params.h +++ b/tensorflow/lite/tools/tool_params.h @@ -94,7 +94,7 @@ class TypedToolParam : public ToolParam { has_value_set_ = true; } - T Get() const { return value_; } + const T& Get() const { return value_; } void Set(const ToolParam& other) override { Set(other.AsConstTyped()->Get()); diff --git a/tensorflow/lite/tools/versioning/gpu_compatibility.cc b/tensorflow/lite/tools/versioning/gpu_compatibility.cc index 29d5e27c4266c3..c5b50ac728cdad 100644 --- a/tensorflow/lite/tools/versioning/gpu_compatibility.cc +++ b/tensorflow/lite/tools/versioning/gpu_compatibility.cc @@ -435,6 +435,41 @@ absl::Status CheckCustomOpsGpuDelegateCompatibility(const OpSignature& op_sig) { absl::StrCat("Not supported custom op ", op_sig.custom_name)); } +absl::Status CheckAddMulBroadcastCompatibility( + const OpSignatureTensorSpec& input0, const OpSignatureTensorSpec& input1) { + if (input0.dims.size() > 1 && input1.dims.size() > 1 && + input0.dims.size() != input1.dims.size()) { + const std::vector*longer_dims, *shorter_dims; + if (input0.dims.size() >= input1.dims.size()) { + longer_dims = &input0.dims; + shorter_dims = &input1.dims; + } else { + longer_dims = &input1.dims; + shorter_dims = &input0.dims; + } + bool is_broadcastable = false; + + if (longer_dims->size() == 4 && shorter_dims->size() == 3 && + longer_dims->at(0) == 1) { + // Broadcasting 3D to 4D with batch 1 works. + is_broadcastable = true; + } else if (longer_dims->size() == 4 && shorter_dims->size() == 2 && + longer_dims->at(0) == 1 && shorter_dims->at(0) == 1 && + shorter_dims->at(1) == 1) { + // Broadcasting 2D [1, 1] to 4D [1, x, y, z] works. + is_broadcastable = true; + } + + if (!is_broadcastable) { + return absl::UnimplementedError( + absl::StrCat("Doesn't support broadcasting - input0: [", + absl::StrJoin(input0.dims, ","), "], input1: [", + absl::StrJoin(input1.dims, ","), "]")); + } + } + return absl::OkStatus(); +} + } // namespace // Logics here used to be in TFLiteOperationParser:IsSupported() @@ -449,12 +484,9 @@ absl::Status CheckGpuDelegateCompatibility(const OpSignature& op_sig) { } const auto& input0 = op_sig.inputs.at(0); const auto& input1 = op_sig.inputs.at(1); - if (input0.dims.size() > 1 && input1.dims.size() > 1 && - input0.dims.size() != input1.dims.size()) { - return absl::UnimplementedError( - absl::StrCat("ADD doesn't support broadcasting - input0: [", - absl::StrJoin(input0.dims, ","), "], input1: [", - absl::StrJoin(input1.dims, ","), "]")); + auto broadcastable = CheckAddMulBroadcastCompatibility(input0, input1); + if (!broadcastable.ok()) { + return broadcastable; } const TfLiteAddParams* tf_options; return RetrieveBuiltinData(op_sig, &tf_options); @@ -700,11 +732,13 @@ absl::Status CheckGpuDelegateCompatibility(const OpSignature& op_sig) { "MUL requires one tensor that not less than second in all " "dimensions."); } - } else if (input0.dims.size() > 1 && input1.dims.size() > 1) { - return absl::UnimplementedError( - absl::StrCat("MUL doesn't support broadcasting - input0: [", - absl::StrJoin(input0.dims, ","), "], input1: [", - absl::StrJoin(input1.dims, ","), "]")); + } else { + const auto& input0 = op_sig.inputs.at(0); + const auto& input1 = op_sig.inputs.at(1); + auto broadcastable = CheckAddMulBroadcastCompatibility(input0, input1); + if (!broadcastable.ok()) { + return broadcastable; + } } const TfLiteMulParams* tf_options; RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options)); diff --git a/tensorflow/lite/tools/versioning/gpu_compatibility_test.cc b/tensorflow/lite/tools/versioning/gpu_compatibility_test.cc index f3ca69dfc7e98d..c5283e8e4adfab 100644 --- a/tensorflow/lite/tools/versioning/gpu_compatibility_test.cc +++ b/tensorflow/lite/tools/versioning/gpu_compatibility_test.cc @@ -94,7 +94,7 @@ TEST(CheckGpuDelegateCompatibility, FCConstInput) { "FullyConnected doesn't support constant input."); } -TEST(CheckGpuDelegateCompatibility, Add1DBroadCastSuccess) { +TEST(CheckGpuDelegateCompatibility, Add1Dto3DBroadcastSuccess) { OpSignature op_sig = OpSignature(); op_sig.op = BuiltinOperator_ADD; auto params = std::make_unique(); @@ -108,20 +108,82 @@ TEST(CheckGpuDelegateCompatibility, Add1DBroadCastSuccess) { EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty()); } -TEST(CheckGpuDelegateCompatibility, Add2DBroadCastFail) { +TEST(CheckGpuDelegateCompatibility, Add2Dto3DBroadcastFail) { OpSignature op_sig = OpSignature(); op_sig.op = BuiltinOperator_ADD; auto params = std::make_unique(); op_sig.builtin_data = static_cast(params.get()); op_sig.inputs = std::vector(2); op_sig.inputs[0] = OpSignatureTensorSpec(); - op_sig.inputs[0].dims = {4, 1, 2}; + op_sig.inputs[0].dims = {1, 100, 256}; + op_sig.inputs[1] = OpSignatureTensorSpec(); + op_sig.inputs[1].dims = {100, 256}; + + EXPECT_EQ(CheckGpuDelegateCompatibility(op_sig).message(), + "Doesn't support broadcasting - input0: [1,100,256], input1: " + "[100,256]"); +} + +TEST(CheckGpuDelegateCompatibility, Add3Dto4DBroadcastFail) { + OpSignature op_sig = OpSignature(); + op_sig.op = BuiltinOperator_ADD; + auto params = std::make_unique(); + op_sig.builtin_data = static_cast(params.get()); + op_sig.inputs = std::vector(2); + op_sig.inputs[0] = OpSignatureTensorSpec(); + op_sig.inputs[0].dims = {4, 1, 1, 2}; op_sig.inputs[1] = OpSignatureTensorSpec(); - op_sig.inputs[1].dims = {2, 2}; + // Can't broadcast using batch of 4 + op_sig.inputs[1].dims = {1, 1, 2}; EXPECT_EQ( CheckGpuDelegateCompatibility(op_sig).message(), - "ADD doesn't support broadcasting - input0: [4,1,2], input1: [2,2]"); + "Doesn't support broadcasting - input0: [4,1,1,2], input1: [1,1,2]"); +} + +TEST(CheckGpuDelegateCompatibility, Add3Dto4DBroadcastSuccess) { + OpSignature op_sig = OpSignature(); + op_sig.op = BuiltinOperator_ADD; + auto params = std::make_unique(); + op_sig.builtin_data = static_cast(params.get()); + op_sig.inputs = std::vector(2); + op_sig.inputs[0] = OpSignatureTensorSpec(); + op_sig.inputs[0].dims = {1, 128, 513, 3}; + op_sig.inputs[1] = OpSignatureTensorSpec(); + // Can be broadcasted to {1, 128, 513, 3} + op_sig.inputs[1].dims = {128, 513, 3}; + + EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty()); +} + +TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess) { + OpSignature op_sig = OpSignature(); + op_sig.op = BuiltinOperator_ADD; + auto params = std::make_unique(); + op_sig.builtin_data = static_cast(params.get()); + op_sig.inputs = std::vector(2); + op_sig.inputs[0] = OpSignatureTensorSpec(); + op_sig.inputs[0].dims = {1, 512, 512, 1}; + op_sig.inputs[1] = OpSignatureTensorSpec(); + // Can be broadcasted to {1, 1, 1, 1} + op_sig.inputs[1].dims = {1, 1}; + + EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty()); +} + +TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess2) { + OpSignature op_sig = OpSignature(); + op_sig.op = BuiltinOperator_ADD; + auto params = std::make_unique(); + op_sig.builtin_data = static_cast(params.get()); + op_sig.inputs = std::vector(2); + op_sig.inputs[0] = OpSignatureTensorSpec(); + op_sig.inputs[0].dims = {1, 384, 384, 3}; + op_sig.inputs[1] = OpSignatureTensorSpec(); + // Can be broadcasted to {1, 1, 1, 1} + op_sig.inputs[1].dims = {1, 1}; + + EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty()); } } // namespace tflite diff --git a/tensorflow/opensource_only.files b/tensorflow/opensource_only.files index b2645a331739e3..49adcabcd51766 100644 --- a/tensorflow/opensource_only.files +++ b/tensorflow/opensource_only.files @@ -101,6 +101,7 @@ tf_staging/tensorflow/lite/delegates/utils/experimental/stable_delegate/BUILD: tf_staging/tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.cc: tf_staging/tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h: tf_staging/tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader_test.cc: +tf_staging/tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h: tf_staging/tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h: tf_staging/tensorflow/lite/experimental/acceleration/mini_benchmark/c/c_api.h: tf_staging/tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg.h: @@ -299,15 +300,16 @@ tf_staging/third_party/py/BUILD.tpl: tf_staging/third_party/py/BUILD: tf_staging/third_party/py/ml_dtypes/BUILD: tf_staging/third_party/py/ml_dtypes/LICENSE: -tf_staging/third_party/py/non_hermetic/BUILD.tpl: -tf_staging/third_party/py/non_hermetic/BUILD: -tf_staging/third_party/py/non_hermetic/README: tf_staging/third_party/py/non_hermetic/ml_dtypes/BUILD: tf_staging/third_party/py/non_hermetic/ml_dtypes/LICENSE: tf_staging/third_party/py/non_hermetic/numpy/BUILD: -tf_staging/third_party/py/non_hermetic/python_configure.bzl: tf_staging/third_party/py/numpy/BUILD: tf_staging/third_party/py/python_configure.bzl: +tf_staging/third_party/py/python_init_pip.bzl: +tf_staging/third_party/py/python_init_repositories.bzl: +tf_staging/third_party/py/python_init_rules.bzl: +tf_staging/third_party/py/python_init_toolchains.bzl: +tf_staging/third_party/py/python_repo.bzl: tf_staging/third_party/pybind11.BUILD: tf_staging/third_party/pybind11_bazel/BUILD: tf_staging/third_party/pybind11_protobuf/BUILD: diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD index 571881111137af..199cf34c4f27ab 100644 --- a/tensorflow/python/BUILD +++ b/tensorflow/python/BUILD @@ -134,6 +134,7 @@ py_strict_library( ":pywrap_tensorflow", ":pywrap_tfe", "//tensorflow/compiler/mlir/quantization/tensorflow/python:quantize_model", + "//tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python:pywrap_tensorflow_to_stablehlo", "//tensorflow/core:protos_all_py", "//tensorflow/dtensor/python:dtensor", "//tensorflow/python/autograph", @@ -285,6 +286,7 @@ py_strict_library( "//tensorflow/python/summary/writer", "//tensorflow/python/tools:module_util", "//tensorflow/python/tools/api/generator:create_python_api", + "//tensorflow/python/tpu:_pywrap_sparse_core_layout", "//tensorflow/python/tpu:datasets", "//tensorflow/python/tpu:functional", "//tensorflow/python/tpu:preempted_hook_py", @@ -767,6 +769,7 @@ pywrap_tensorflow_macro( "//tensorflow/cc/saved_model:metrics_impl", "//tensorflow/compiler/mlir/quantization/stablehlo/python:pywrap_quantization_lib_impl", "//tensorflow/compiler/mlir/quantization/tensorflow/python:quantize_model_cc_impl", + "//tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python:pywrap_tensorflow_to_stablehlo_lib_impl", "//tensorflow/compiler/mlir/tensorflow/c:mlir_c_api_registration", "//tensorflow/compiler/tf2tensorrt:op_converter_registry_impl", "//tensorflow/compiler/tf2xla:tf2xla_opset", @@ -799,6 +802,7 @@ pywrap_tensorflow_macro( "//tensorflow/core/platform:stacktrace_handler", "//tensorflow/core/profiler:profiler_impl", "//tensorflow/core/profiler/internal:print_model_analysis", + "//tensorflow/core/tpu/kernels:sparse_core_layout", "//tensorflow/core/util:determinism", "//tensorflow/distribute/experimental/rpc/kernels:rpc_ops", "//tensorflow/dtensor/cc:dtensor_device_cc", @@ -881,6 +885,7 @@ filegroup( "//tensorflow/compiler/jit:flags", # tfe "//tensorflow/compiler/jit:get_compiler_ir", # tfe "//tensorflow/compiler/mlir/quantization/tensorflow/python:quantize_model_cc_impl", # quantization + "//tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python:pywrap_tensorflow_to_stablehlo_lib_impl", # tensorflow_to_stablehlo "//tensorflow/compiler/tf2xla:tf2xla_opset", # pywrap_xla_ops "//tensorflow/core:framework_internal_impl", # op_def_registry "//tensorflow/core:lib_internal_impl", # device_lib @@ -920,6 +925,7 @@ filegroup( "//tensorflow/core/platform:statusor", # tfe "//tensorflow/core/profiler/internal:print_model_analysis", # tfprof "//tensorflow/core/profiler/rpc/client:profiler_client_impl", # profiler + "//tensorflow/core/tpu/kernels:sparse_core_layout", # sparse_core_layouts "//tensorflow/core/util:determinism", # determinism "//tensorflow/core/util:port", # util_port "//tensorflow/core/util/tensor_bundle", # checkpoint_reader diff --git a/tensorflow/python/checkpoint/functional_saver.py b/tensorflow/python/checkpoint/functional_saver.py index 48d8f043458ca4..995e850b82d53b 100644 --- a/tensorflow/python/checkpoint/functional_saver.py +++ b/tensorflow/python/checkpoint/functional_saver.py @@ -284,7 +284,7 @@ def __init__( if (checkpoint_key, slice_spec) in self._keys_to_restore_fn: raise ValueError( - "Recieved multiple tensors with the same checkpoint key and " + "Received multiple tensors with the same checkpoint key and " "slice spec. This is invalid because one will overwrite the " "other in the checkpoint. This indicates a bug in the " "Checkpoint key-generation.") diff --git a/tensorflow/python/compat/compat.py b/tensorflow/python/compat/compat.py index 21f795a3a55a41..0a9176e41d5a19 100644 --- a/tensorflow/python/compat/compat.py +++ b/tensorflow/python/compat/compat.py @@ -29,7 +29,7 @@ # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. -_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2024, 5, 12) +_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2024, 5, 21) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None diff --git a/tensorflow/python/data/kernel_tests/BUILD b/tensorflow/python/data/kernel_tests/BUILD index dc0d785798622b..da147e0f39435e 100644 --- a/tensorflow/python/data/kernel_tests/BUILD +++ b/tensorflow/python/data/kernel_tests/BUILD @@ -1360,6 +1360,7 @@ tf_py_strict_test( deps = [ ":checkpoint_test_base", ":test_base", + "//tensorflow/python/data/experimental/ops:random_access", "//tensorflow/python/data/ops:dataset_ops", "//tensorflow/python/data/ops:options", "//tensorflow/python/framework:combinations", diff --git a/tensorflow/python/data/kernel_tests/unbatch_test.py b/tensorflow/python/data/kernel_tests/unbatch_test.py index d86075cd2b73bf..a65c6a2f2f393d 100644 --- a/tensorflow/python/data/kernel_tests/unbatch_test.py +++ b/tensorflow/python/data/kernel_tests/unbatch_test.py @@ -16,6 +16,7 @@ from absl.testing import parameterized import numpy as np +from tensorflow.python.data.experimental.ops import random_access from tensorflow.python.data.kernel_tests import checkpoint_test_base from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops @@ -262,5 +263,34 @@ def test(self, verify_fn, symbolic_checkpoint): num_outputs) +class UnbatchRandomAccessTest(test_base.DatasetTestBase, + parameterized.TestCase): + @combinations.generate(test_base.default_test_combinations()) + def test(self): + dataset = dataset_ops.Dataset.range(10) + dataset = dataset.batch(4, drop_remainder=True) + dataset = dataset.unbatch() + for i in range(8): + self.assertEqual(self.evaluate(random_access.at(dataset, i)), i) + + @combinations.generate(test_base.default_test_combinations()) + def testNotDropRemainder(self): + dataset = dataset_ops.Dataset.range(10) + dataset = dataset.batch(4, drop_remainder=False) + dataset = dataset.unbatch() + with self.assertRaises(errors.FailedPreconditionError): + self.evaluate(random_access.at(dataset, 0)) + + @combinations.generate( + combinations.times(test_base.default_test_combinations(), + combinations.combine(index=[-1, 100]))) + def testInvalidIndex(self, index): + dataset = dataset_ops.Dataset.range(10) + dataset = dataset.batch(4, drop_remainder=True) + dataset = dataset.unbatch() + with self.assertRaises(errors.OutOfRangeError): + self.evaluate(random_access.at(dataset, index=index)) + + if __name__ == "__main__": test.main() diff --git a/tensorflow/python/distribute/BUILD b/tensorflow/python/distribute/BUILD index 5534e7487ff827..0fc31860524967 100644 --- a/tensorflow/python/distribute/BUILD +++ b/tensorflow/python/distribute/BUILD @@ -658,6 +658,7 @@ cuda_py_strict_test( srcs_version = "PY3", tags = [ "multi_and_single_gpu", + "notap", # TODO(b/341375925): Re-enable this test when flakiness is fixed. ], xla_enabled = True, deps = [ diff --git a/tensorflow/python/distribute/distribute_utils.py b/tensorflow/python/distribute/distribute_utils.py index 73ef2fcf0f5454..fd577236210a90 100644 --- a/tensorflow/python/distribute/distribute_utils.py +++ b/tensorflow/python/distribute/distribute_utils.py @@ -260,7 +260,7 @@ def is_distributed_table(v): def _validate_colocate_extended(v, extended): variable_strategy = v._distribute_strategy # pylint: disable=protected-access - if variable_strategy.extended is not extended: + if not variable_strategy or variable_strategy.extended is not extended: raise ValueError( "`colocate_vars_with` must only be passed a variable created in this " "tf.distribute.Strategy.scope(), not %s created in scope: %s" % diff --git a/tensorflow/python/flags_pybind.pyi b/tensorflow/python/flags_pybind.pyi index c08b02ddcb9b5d..b34ed2f4b68c19 100644 --- a/tensorflow/python/flags_pybind.pyi +++ b/tensorflow/python/flags_pybind.pyi @@ -21,6 +21,7 @@ class Flag: class Flags: enable_aggressive_constant_replication: Flag enable_colocation_key_propagation_in_while_op_lowering: Flag + enable_function_pruning_before_inlining: Flag enable_nested_function_shape_inference: Flag enable_quantized_dtypes_training: Flag enable_tf2min_ici_weight: Flag diff --git a/tensorflow/python/ops/array_grad.py b/tensorflow/python/ops/array_grad.py index c5ba8157dc73e2..7f856d10b0c082 100644 --- a/tensorflow/python/ops/array_grad.py +++ b/tensorflow/python/ops/array_grad.py @@ -811,6 +811,27 @@ def _IdNGrad(_, *grad): @ops.RegisterGradient("Reshape") def _ReshapeGrad(op: ops.Operation, grad): + """Defines the gradient for `array_ops.reshape()`.""" + input_shape = op.inputs[0].shape + if input_shape.rank is not None and not input_shape.is_fully_defined(): + # If only one dimension is undefined, we can use a wildcard dimension in + # the argument to `reshape()` to avoid creating a data dependency via + # a dynamic `shape()` operation. + input_shape_as_list = input_shape.as_list() + undefined_dims = [] + has_zero_dim = False + for i, dim in enumerate(input_shape_as_list): + if dim is None: + undefined_dims.append(i) + elif dim == 0: + # When the tensor has zero elements, the wildcard dimension + # is underspecified, and `reshape()` will arbitrarily set the unknown + # to `1`, triggering shape errors downstream. + has_zero_dim = True + if len(undefined_dims) == 1 and not has_zero_dim: + input_shape_as_list[undefined_dims[0]] = -1 + return [array_ops.reshape(_IndexedSlicesToTensorNoWarning(grad), + input_shape_as_list), None] return [ array_ops.reshape( _IndexedSlicesToTensorNoWarning(grad), array_ops.shape(op.inputs[0])), diff --git a/tensorflow/python/ops/array_grad_test.py b/tensorflow/python/ops/array_grad_test.py index 3dfe394a2c15e3..1f7c65e5b6fe70 100644 --- a/tensorflow/python/ops/array_grad_test.py +++ b/tensorflow/python/ops/array_grad_test.py @@ -147,6 +147,41 @@ def f(x): self._testGrad(f, x) + def test_reshape_simple(self): + x = constant_op.constant([1., 2., 3.], dtype=dtypes.float64) + y = constant_op.constant([3, 1], dtype=dtypes.int64) + + def f(x): + return array_ops.reshape(x, y) + + self._testGrad(f, x) + + def test_reshape_one_unknown_dim(self): + def f(x): + x_without_shape = array_ops.placeholder_with_default(x, shape=[None, 2]) + return array_ops.reshape(x_without_shape, [3, 2]) + + x = constant_op.constant([[1., 2.], [3., 4.], [5., 6.]], + dtype=dtypes.float64) + self._testGrad(f, x) + + def test_reshape_two_unknown_dims(self): + def f(x): + x_without_shape = array_ops.placeholder_with_default(x, + shape=[None, None]) + return array_ops.reshape(x_without_shape, [6]) + + x = constant_op.constant([[1., 2., 3.], [4., 5., 6.]], dtype=dtypes.float64) + self._testGrad(f, x) + + def test_reshape_one_unknown_dim_and_zero_elements(self): + def f(x): + x_without_shape = array_ops.placeholder_with_default(x, shape=[None, 0]) + return array_ops.reshape(x_without_shape, [0]) + + x = constant_op.constant([], shape=[3, 0], dtype=dtypes.float64) + self._testGrad(f, x) + if __name__ == "__main__": test.main() diff --git a/tensorflow/python/ops/numpy_ops/np_dtypes.py b/tensorflow/python/ops/numpy_ops/np_dtypes.py index 00e2cdfc22964b..ec3608df67cb7b 100644 --- a/tensorflow/python/ops/numpy_ops/np_dtypes.py +++ b/tensorflow/python/ops/numpy_ops/np_dtypes.py @@ -27,10 +27,6 @@ tf_export.tf_export('experimental.numpy.bool_', v1=[]).export_constant( __name__, 'bool_' ) -complex_ = np.complex_ -tf_export.tf_export('experimental.numpy.complex_', v1=[]).export_constant( - __name__, 'complex_' -) complex128 = np.complex128 tf_export.tf_export('experimental.numpy.complex128', v1=[]).export_constant( __name__, 'complex128' @@ -39,10 +35,6 @@ tf_export.tf_export('experimental.numpy.complex64', v1=[]).export_constant( __name__, 'complex64' ) -float_ = np.float_ -tf_export.tf_export('experimental.numpy.float_', v1=[]).export_constant( - __name__, 'float_' -) float16 = np.float16 tf_export.tf_export('experimental.numpy.float16', v1=[]).export_constant( __name__, 'float16' @@ -107,6 +99,20 @@ tf_export.tf_export('experimental.numpy.unicode_', v1=[]).export_constant( __name__, 'unicode_' ) +if int(np.__version__.split('.')[0]) < 2: + complex_ = np.complex_ + float_ = np.float_ +else: + # Aliases np.complex_ and np.float_ have been removed in Numpy 2.0. Use + # np.complex128 and np.float64 instead. + complex_ = np.complex128 + float_ = np.float64 +tf_export.tf_export('experimental.numpy.complex_', v1=[]).export_constant( + __name__, 'complex_' +) +tf_export.tf_export('experimental.numpy.float_', v1=[]).export_constant( + __name__, 'float_' +) iinfo = np.iinfo diff --git a/tensorflow/python/ops/numpy_ops/tests/np_test.py b/tensorflow/python/ops/numpy_ops/tests/np_test.py index d27c39a896dea7..fde60098841096 100644 --- a/tensorflow/python/ops/numpy_ops/tests/np_test.py +++ b/tensorflow/python/ops/numpy_ops/tests/np_test.py @@ -62,7 +62,7 @@ all_dtypes = number_dtypes + bool_dtypes -python_scalar_dtypes = [tnp.bool_, tnp.int_, tnp.float_, tnp.complex_] +python_scalar_dtypes = [tnp.bool_, tnp.int_, tnp.float64, tnp.complex128] # pylint: disable=unnecessary-lambda,g-long-lambda,expression-not-assigned def _valid_dtypes_for_shape(shape, dtypes): @@ -434,7 +434,7 @@ def wrapper(*args, **kw): if inexact and not any( tnp.issubdtype(tnp.result_type(x).as_numpy_dtype, tnp.inexact) for x in flat_args): - dtype = tnp.result_type(tnp.float_, *flat_args) + dtype = tnp.result_type(tnp.float64, *flat_args) else: dtype = tnp.result_type(*flat_args) dtype = dtype.as_numpy_dtype @@ -479,7 +479,7 @@ class LaxBackedNumpyTests(jtu.TestCase): def _GetArgsMaker(self, rng, shapes, dtypes, onp_arrays=True): def f(): - out = [rng(shape, dtype or tnp.float_) + out = [rng(shape, dtype or tnp.float64) for shape, dtype in zip(shapes, dtypes)] return out if onp_arrays else [tnp.asarray(a) for a in out] return f @@ -1775,14 +1775,14 @@ def testAverage(self, shape, dtype, axis, weights_shape, returned, rng_factory): "arg": arg, "ndmin": ndmin, "dtype": dtype} for i, (arg, dtype) in enumerate([ ([True, False, True], tnp.bool_), - (3., tnp.float_), + (3., tnp.float64), ([1, 2, 3], tnp.int_), - ([1., 2., 3.], tnp.float_), + ([1., 2., 3.], tnp.float64), ([[1, 2], [3, 4], [5, 6]], tnp.int_), - ([[1, 2.], [3, 4], [5, 6]], tnp.float_), - ([[1., 2j], [3., 4.], [5., 6.]], tnp.complex_), - ([[3, onp.array(2, dtype=tnp.float_), 1], - onp.arange(3., dtype=tnp.float_)], tnp.float_), # pylint: disable=bad-continuation + ([[1, 2.], [3, 4], [5, 6]], tnp.float64), + ([[1., 2j], [3., 4.], [5., 6.]], tnp.complex128), + ([[3, onp.array(2, dtype=tnp.float64), 1], + onp.arange(3., dtype=tnp.float64)], tnp.float64), # pylint: disable=bad-continuation ]) for ndmin in [None, onp.ndim(arg), onp.ndim(arg) + 1, onp.ndim(arg) + 2])) def testArray(self, arg, ndmin, dtype): @@ -2055,14 +2055,14 @@ def testAstype(self): def testOnpMean(self): # from https://github.com/google/jax/issues/125 - x = tnp.add(tnp.eye(3, dtype=tnp.float_), 0.) + x = tnp.add(tnp.eye(3, dtype=tnp.float64), 0.) ans = onp.mean(x) self.assertAllClose(ans, onp.array(1./3), check_dtypes=False) @jtu.disable def testArangeOnFloats(self): # from https://github.com/google/jax/issues/145 - expected = onp.arange(0.0, 1.0, 0.1, dtype=tnp.float_) + expected = onp.arange(0.0, 1.0, 0.1, dtype=tnp.float64) ans = tnp.arange(0.0, 1.0, 0.1) self.assertAllClose(expected, ans, check_dtypes=True) @@ -2431,8 +2431,8 @@ def testIssue453(self): @named_parameters(jtu.cases_from_list( {"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__), "pytype": pytype, "dtype": dtype, "op": op} - for pytype, dtype in [(int, tnp.int_), (float, tnp.float_), - (bool, tnp.bool_), (complex, tnp.complex_)] + for pytype, dtype in [(int, tnp.int_), (float, tnp.float64), + (bool, tnp.bool_), (complex, tnp.complex128)] for op in ["atleast_1d", "atleast_2d", "atleast_3d"])) def testAtLeastNdLiterals(self, pytype, dtype, op): # Fixes: https://github.com/google/jax/issues/634 @@ -2470,7 +2470,7 @@ def testArange(self): onp.arange(2, 13, dtype=int), check_dtypes=True) self.assertAllClose(tnp.arange(0, 1, -0.5), - onp.arange(0, 1, -0.5, dtype=tnp.float_), + onp.arange(0, 1, -0.5, dtype=tnp.float64), check_dtypes=True) self.assertRaises(TypeError, lambda: tnp.arange()) diff --git a/tensorflow/python/ops/numpy_ops/tests/test_util.py b/tensorflow/python/ops/numpy_ops/tests/test_util.py index cf178a3f5dfbcc..086b182bf2a628 100644 --- a/tensorflow/python/ops/numpy_ops/tests/test_util.py +++ b/tensorflow/python/ops/numpy_ops/tests/test_util.py @@ -76,8 +76,8 @@ python_scalar_dtypes = { bool: onp.dtype(onp.bool_), int: onp.dtype(onp.int_), - float: onp.dtype(onp.float_), - complex: onp.dtype(onp.complex_), + float: onp.dtype(onp.float64), + complex: onp.dtype(onp.complex128), } diff --git a/tensorflow/python/ops/string_ops.py b/tensorflow/python/ops/string_ops.py index 2e3bb68e0c20f2..b6c0e46c5f1028 100644 --- a/tensorflow/python/ops/string_ops.py +++ b/tensorflow/python/ops/string_ops.py @@ -478,7 +478,7 @@ def string_to_number(input, out_type=dtypes.float32, name=None): Args: input: A `Tensor` of type `string`. out_type: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32, - tf.int64`. Defaults to `tf.float32`. + tf.int64, tf.uint32, tf.uint64`. Defaults to `tf.float32`. The numeric type to interpret each string in `string_tensor` as. name: A name for the operation (optional). diff --git a/tensorflow/python/platform/BUILD b/tensorflow/python/platform/BUILD index df26d8b84ec5a3..0ca7e7bfae738f 100644 --- a/tensorflow/python/platform/BUILD +++ b/tensorflow/python/platform/BUILD @@ -15,6 +15,7 @@ visibility = [ # copybara:uncomment "//learning/brain/contrib/eager/numlib/benchmarks/kumamon:__subpackages__", # copybara:uncomment "//learning/brain/mobile/lite/tooling/model_analyzer:__subpackages__", # copybara:uncomment "//tensorflow_serving/model_servers:__subpackages__", + # copybara:uncomment "//third_party/odml/model_customization/quantization:__subpackages__", ] package( diff --git a/tensorflow/python/tpu/BUILD b/tensorflow/python/tpu/BUILD index 108e643b1e4ea9..c9c2b7f75a0710 100644 --- a/tensorflow/python/tpu/BUILD +++ b/tensorflow/python/tpu/BUILD @@ -1015,6 +1015,26 @@ tf_proto_library( # ) # copybara:uncomment_end +tf_python_pybind_extension( + name = "_pywrap_sparse_core_layout", + srcs = ["pywrap_sparse_core_layout.cc"], + enable_stub_generation = True, + features = ["-layering_check"], + pytype_srcs = [ + "_pywrap_sparse_core_layout.pyi", + ], + deps = [ + "//tensorflow/core/tpu/kernels:_pywrap_sparse_core_layout_header_only", + "//tensorflow/python/lib/core:pybind11_lib", + "//tensorflow/python/lib/core:pybind11_status", + "//tensorflow/python/lib/core:pybind11_status_headers", + "//third_party/python_runtime:headers", # buildcleaner: keep + "@pybind11", + "@pybind11_abseil//pybind11_abseil:status_casters", + "@pybind11_protobuf//pybind11_protobuf:native_proto_caster", + ], +) + tf_python_pybind_extension( name = "_pywrap_tpu_embedding", srcs = ["pywrap_tpu_embedding.cc"], @@ -1039,6 +1059,7 @@ py_strict_library( name = "tpu_embedding_v3", srcs = ["tpu_embedding_v3.py"], deps = [ + ":_pywrap_sparse_core_layout", ":_pywrap_tpu_embedding", ":tpu_embedding_base", ":tpu_embedding_v2_utils", diff --git a/tensorflow/python/tpu/_pywrap_sparse_core_layout.pyi b/tensorflow/python/tpu/_pywrap_sparse_core_layout.pyi new file mode 100644 index 00000000000000..778093406836bc --- /dev/null +++ b/tensorflow/python/tpu/_pywrap_sparse_core_layout.pyi @@ -0,0 +1,24 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Any + +class SparseCoreLayoutStacker: + def __init__(self, num_partitions: int, sparse_cores_per_partition: int) -> None: ... + def AddTable(self, table_name: str, table_height: int, table_width: int, group: str, output_samples: int) -> None: ... + def GetLayouts(self, *args, **kwargs) -> Any: ... + def SetActivationMemoryBytesLimit(self, arg0: int) -> None: ... + def SetStackingEnabled(self, arg0: bool) -> None: ... + def SetVariableShardBytesLimit(self, arg0: int) -> None: ... diff --git a/tensorflow/python/tpu/pywrap_sparse_core_layout.cc b/tensorflow/python/tpu/pywrap_sparse_core_layout.cc new file mode 100644 index 00000000000000..785efe5d682a25 --- /dev/null +++ b/tensorflow/python/tpu/pywrap_sparse_core_layout.cc @@ -0,0 +1,43 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include + +#include "pybind11/cast.h" // from @pybind11 +#include "pybind11/detail/common.h" // from @pybind11 +#include "pybind11_abseil/status_casters.h" // from @pybind11_abseil +#include "pybind11_protobuf/native_proto_caster.h" // from @pybind11_protobuf +#include "tensorflow/core/tpu/kernels/sparse_core_layout.h" +#include "tensorflow/core/tpu/kernels/sparse_core_layout.pb.h" + +namespace tensorflow::tpu { + +namespace py = pybind11; + +PYBIND11_MODULE(_pywrap_sparse_core_layout, m) { + py::class_(m, "SparseCoreLayoutStacker") + .def(py::init(), py::arg("num_partitions"), + py::arg("sparse_cores_per_partition")) + .def("SetActivationMemoryBytesLimit", + &SparseCoreLayoutStacker::SetActivationMemoryBytesLimit) + .def("SetVariableShardBytesLimit", + &SparseCoreLayoutStacker::SetVariableShardBytesLimit) + .def("SetStackingEnabled", &SparseCoreLayoutStacker::SetStackingEnabled) + .def("AddTable", &SparseCoreLayoutStacker::AddTable, + py::arg("table_name"), py::arg("table_height"), + py::arg("table_width"), py::arg("group"), py::arg("output_samples")) + .def("GetLayouts", &SparseCoreLayoutStacker::GetLayouts); +} + +} // namespace tensorflow::tpu diff --git a/tensorflow/python/tpu/tpu_embedding_v3.py b/tensorflow/python/tpu/tpu_embedding_v3.py index c32823017f1685..c1eb333915a0d8 100644 --- a/tensorflow/python/tpu/tpu_embedding_v3.py +++ b/tensorflow/python/tpu/tpu_embedding_v3.py @@ -307,6 +307,20 @@ def read_from_device(self, device): ) +def _clone_feature_config(feature_config): + old_to_new_table = {} + new_features = [] + + for old_feature in nest.flatten(feature_config): + feature = copy.copy(old_feature) + if feature.table not in old_to_new_table: + old_to_new_table[feature.table] = copy.copy(feature.table) + feature.table = old_to_new_table[feature.table] + new_features.append(feature) + + return nest.pack_sequence_as(feature_config, new_features) + + def _stack_tables_with_same_table_dim_and_optimizer( table_config: Sequence[TableConfig], flat_features: Sequence[Tuple[Any, FeatureConfig]], @@ -496,7 +510,7 @@ def __init__( # We do a clone on the feature_config here as we will alter settings in it # and we don't want the user to see these. We can't just use clone here # as we need to maintain some object relationships. - super().__init__(self._clone_feature_config(feature_config), optimizer) + super().__init__(_clone_feature_config(feature_config), optimizer) self._strategy = distribute_lib.get_strategy() if not isinstance( self._strategy, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV2) @@ -621,19 +635,6 @@ def _update_sparse_core_buffer_size_after_table_stacking(self): ] ) - def _clone_feature_config(self, feature_config): - old_to_new_table = {} - new_features = [] - - for old_feature in nest.flatten(feature_config): - feature = copy.copy(old_feature) - if feature.table not in old_to_new_table: - old_to_new_table[feature.table] = copy.copy(feature.table) - feature.table = old_to_new_table[feature.table] - new_features.append(feature) - - return nest.pack_sequence_as(feature_config, new_features) - @property def embedding_tables( self, @@ -1094,16 +1095,16 @@ def compute_sparse_core_stats( sparse_core_embedding_config: Optional[SparseCoreEmbeddingConfig] = None, ) -> Tuple[Any, Any]: """Computes the max_ids/unique ids settings from the input features.""" - + copy_feature_config = _clone_feature_config(feature_config) table_config = [] - for feature in nest.flatten(feature_config): + for feature in nest.flatten(copy_feature_config): table_config.append(feature.table) for table in table_config: if table.optimizer is None: table.optimizer = optimizer - flat_features = nest.flatten_with_joined_string_paths(feature_config) + flat_features = nest.flatten_with_joined_string_paths(copy_feature_config) s = _stack_tables_with_same_table_dim_and_optimizer( table_config, @@ -1863,7 +1864,9 @@ def _stack_gradients(self, gradients): # this file is OSSed. def extract_variable_info( kwargs: Any, -) -> Tuple[str, Tuple[int, ...], dtypes.DType, Callable[[], Any]]: +) -> Tuple[ + str, Tuple[int, ...], dtypes.DType, Callable[[], Any], Optional[int] +]: """Extracts the variable creation attributes from the kwargs. Args: @@ -1871,8 +1874,13 @@ def extract_variable_info( scope. Returns: - A tuple of variable name, shape, dtype, initialization function. + A tuple of variable name, shape, dtype, initialization function, + restore_uid. """ + + def get_restore_uid(initial_value: Callable[..., Any]) -> int | None: + return getattr(initial_value, "restore_uid", None) + if isinstance(kwargs["initial_value"], functools.partial) and ( "shape" in kwargs["initial_value"].keywords or kwargs["initial_value"].args @@ -1887,6 +1895,7 @@ def extract_variable_info( shape, kwargs["initial_value"].keywords.get("dtype", kwargs["dtype"]), kwargs["initial_value"].func, + get_restore_uid(kwargs["initial_value"].func), ) elif ( "shape" not in kwargs @@ -1908,6 +1917,7 @@ def extract_variable_info( kwargs["shape"], kwargs["dtype"], kwargs["initial_value"], + get_restore_uid(kwargs["initial_value"]), ) @@ -1961,7 +1971,9 @@ def _create_sharded_variable(next_creator, *args, **kwargs): "shard_info must be in arguments of the init function." ) - name, shape, dtype, unwrapped_initial_value = extract_variable_info(kwargs) + name, shape, dtype, unwrapped_initial_value, restore_uid = ( + extract_variable_info(kwargs) + ) shape = ops.tensor_shape.TensorShape(shape) num_devices = num_replicas * num_cores_per_replica @@ -2005,6 +2017,9 @@ def _create_sharded_variable(next_creator, *args, **kwargs): result = TPUEmbeddingShardedVariable( strategy, variables, tf_variables.VariableAggregation.NONE, None ) + if restore_uid is not None: + result._maybe_initialize_trackable() # pylint: disable=protected-access + result._update_uid = restore_uid # pylint: disable=protected-access return result return _create_sharded_variable diff --git a/tensorflow/python/tpu/tpu_embedding_v3_test.py b/tensorflow/python/tpu/tpu_embedding_v3_test.py index 75d9edff9bbdb7..433eac09c06759 100644 --- a/tensorflow/python/tpu/tpu_embedding_v3_test.py +++ b/tensorflow/python/tpu/tpu_embedding_v3_test.py @@ -684,6 +684,9 @@ def test_compute_sparse_core_stats_and_pass_it_to_api(self): optimizer=tpu_embedding_v2_utils.SGD(learning_rate=1.0), ) ) + self.assertEqual( + feature_config.table.dim, 127, 'Unexpected update to FeatureConfig' + ) sparse_core_embedding_config = tpu_embedding_v3.SparseCoreEmbeddingConfig( disable_table_stacking=False, @@ -708,7 +711,9 @@ def step(data): return strategy.run(step, args=(data,)) result = test_fn() - + self.assertEqual( + feature_config.table.dim, 127, 'Unexpected update to FeatureConfig' + ) mid_level_api_cpu = tpu_embedding_for_serving.TPUEmbeddingForServing( feature_config=feature_config, optimizer=tpu_embedding_v2_utils.SGD(learning_rate=1.0), diff --git a/tensorflow/tools/api/tests/convert_from_multiline.cc b/tensorflow/tools/api/tests/convert_from_multiline.cc index afde8f9f122f77..9ad5a12be7d378 100644 --- a/tensorflow/tools/api/tests/convert_from_multiline.cc +++ b/tensorflow/tools/api/tests/convert_from_multiline.cc @@ -48,7 +48,7 @@ Status ConvertFilesFromMultiline(const string& input_dir, TF_RETURN_IF_ERROR( tensorflow::WriteStringToFile(env, output_path, contents)); } - return OkStatus(); + return absl::OkStatus(); } } // namespace } // namespace tensorflow diff --git a/tensorflow/tools/benchmark/BUILD b/tensorflow/tools/benchmark/BUILD index dbac33541f0872..cdfb36c3a3eae2 100644 --- a/tensorflow/tools/benchmark/BUILD +++ b/tensorflow/tools/benchmark/BUILD @@ -40,7 +40,11 @@ cc_library( "//tensorflow/core:tensorflow", "//tensorflow/core:test", ], - }), + }) + [ + "//tensorflow/core/platform:numbers", + "//tensorflow/core/util:stats_calculator_portable", + "@local_tsl//tsl/platform:status", + ], ) tf_cc_test( @@ -50,9 +54,11 @@ tf_cc_test( deps = [ ":benchmark_model_lib", "//tensorflow/cc:cc_ops", + "//tensorflow/cc:scope", "//tensorflow/core:core_cpu", "//tensorflow/core:framework", "//tensorflow/core:lib", + "//tensorflow/core:protos_all_cc", "//tensorflow/core:test", "//tensorflow/core:test_main", "//tensorflow/core:testlib", diff --git a/tensorflow/tools/benchmark/benchmark_model.cc b/tensorflow/tools/benchmark/benchmark_model.cc index f0a97c2170a3fc..b687d725dccf06 100644 --- a/tensorflow/tools/benchmark/benchmark_model.cc +++ b/tensorflow/tools/benchmark/benchmark_model.cc @@ -26,25 +26,34 @@ limitations under the License. #include #include -#include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" +#include "tensorflow/core/framework/numeric_types.h" #include "tensorflow/core/framework/step_stats.pb.h" #include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/graph/algorithm.h" +#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/framework/types.h" +#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/graph.h" -#include "tensorflow/core/lib/strings/numbers.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/platform/logging.h" -#include "tensorflow/core/platform/platform.h" +#include "tensorflow/core/platform/numbers.h" +#include "tensorflow/core/platform/status.h" +#include "tensorflow/core/platform/tstring.h" #include "tensorflow/core/platform/types.h" +#include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/public/session.h" +#include "tensorflow/core/public/session_options.h" #include "tensorflow/core/util/command_line_flags.h" #include "tensorflow/core/util/reporter.h" #include "tensorflow/core/util/stat_summarizer.h" +#include "tensorflow/core/util/stat_summarizer_options.h" +#include "tensorflow/core/util/stats_calculator.h" +#include "tsl/platform/errors.h" +#include "tsl/platform/status.h" namespace tensorflow { namespace benchmark_model { diff --git a/tensorflow/tools/benchmark/benchmark_model.h b/tensorflow/tools/benchmark/benchmark_model.h index 8211605ace6dea..e983ea4167d740 100644 --- a/tensorflow/tools/benchmark/benchmark_model.h +++ b/tensorflow/tools/benchmark/benchmark_model.h @@ -16,6 +16,10 @@ limitations under the License. #ifndef TENSORFLOW_TOOLS_BENCHMARK_BENCHMARK_MODEL_H_ #define TENSORFLOW_TOOLS_BENCHMARK_BENCHMARK_MODEL_H_ +#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/platform/status.h" +#include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/session.h" #include "tensorflow/core/util/stat_summarizer.h" diff --git a/tensorflow/tools/benchmark/benchmark_model_test.cc b/tensorflow/tools/benchmark/benchmark_model_test.cc index c2a953ec91f7a7..c6e42840c6a689 100644 --- a/tensorflow/tools/benchmark/benchmark_model_test.cc +++ b/tensorflow/tools/benchmark/benchmark_model_test.cc @@ -15,13 +15,21 @@ limitations under the License. #include "tensorflow/tools/benchmark/benchmark_model.h" -#include "tensorflow/cc/ops/standard_ops.h" +#include "tensorflow/cc/framework/scope.h" +#include "tensorflow/cc/ops/array_ops.h" +#include "tensorflow/cc/ops/math_ops.h" +#include "tensorflow/core/framework/graph.pb.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" -#include "tensorflow/core/graph/graph_def_builder.h" -#include "tensorflow/core/lib/core/status_test_util.h" -#include "tensorflow/core/lib/io/path.h" +#include "tensorflow/core/framework/types.pb.h" +#include "tensorflow/core/platform/env.h" +#include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" -#include "tensorflow/core/platform/test_benchmark.h" +#include "tensorflow/core/platform/types.h" +#include "tensorflow/core/public/session.h" +#include "tensorflow/core/util/stat_summarizer.h" +#include "tsl/lib/core/status_test_util.h" namespace tensorflow { namespace { diff --git a/tensorflow/tools/def_file_filter/symbols_pybind.txt b/tensorflow/tools/def_file_filter/symbols_pybind.txt index 78c42c6f454c5c..69aea896b57102 100644 --- a/tensorflow/tools/def_file_filter/symbols_pybind.txt +++ b/tensorflow/tools/def_file_filter/symbols_pybind.txt @@ -566,6 +566,10 @@ tensorflow::quantization::QuantizeStaticRangePtq tensorflow::quantization::QuantizeDynamicRangePtq tensorflow::quantization::QuantizeWeightOnly +[//tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python:pywrap_tensorflow_to_stablehlo_lib_impl] # tensorflow_to_stablehlo +mlir::tensorflow_to_stablehlo::pywrap::PywrapSavedModelToStablehlo +mlir::tensorflow_to_stablehlo::pywrap::PywrapTfModuleToStablehlo + [//tensorflow/dtensor/cc:dtensor_device_cc] # DTensor tensorflow::dtensor::AllocateDTensorDevice tensorflow::dtensor::AddMesh @@ -590,3 +594,9 @@ tensorflow::dtensor::SetIteratorElementLayouts tensorflow::dtensor::Mesh tensorflow::dtensor::Layout tensorflow::dtensor::Layout::LayoutType + +[//tensorflow/core/tpu/kernels:sparse_core_layout] #SparseCoreLayoutStacker +tensorflow::tpu::SparseCoreLayoutStacker::AddTable +tensorflow::tpu::SparseCoreLayoutStacker::SparseCoreLayoutStacker +tensorflow::tpu::SparseCoreLayoutStacker::AddTable +tensorflow::tpu::SparseCoreLayoutStacker::GetLayouts \ No newline at end of file diff --git a/tensorflow/workspace2.bzl b/tensorflow/workspace2.bzl index 77eea2ac869167..18e5e4a582e33b 100644 --- a/tensorflow/workspace2.bzl +++ b/tensorflow/workspace2.bzl @@ -205,9 +205,9 @@ def _tf_repositories(): tf_http_archive( name = "onednn", build_file = "//third_party/mkl_dnn:mkldnn_v1.BUILD", - sha256 = "e291fa4702f4bcfa6c8c23cb5b6599f0fefa8f23bc08edb9e15ddc5254ab7843", - strip_prefix = "oneDNN-3.3.4", - urls = tf_mirror_urls("https://github.com/oneapi-src/oneDNN/archive/refs/tags/v3.3.4.tar.gz"), + sha256 = "906559a25581b292352420721112e1656d21029b66e8597816f9e741fbcdeadb", + strip_prefix = "oneDNN-3.4.1", + urls = tf_mirror_urls("https://github.com/oneapi-src/oneDNN/archive/refs/tags/v3.4.1.tar.gz"), ) tf_http_archive( diff --git a/third_party/flatbuffers/workspace.bzl b/third_party/flatbuffers/workspace.bzl index 129880229e7dcb..b564d0331abe15 100644 --- a/third_party/flatbuffers/workspace.bzl +++ b/third_party/flatbuffers/workspace.bzl @@ -5,10 +5,10 @@ load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") # _FLATBUFFERS_GIT_COMMIT / _FLATBUFFERS_SHA256 were added due to an urgent change being made to # Flatbuffers that needed to be updated in order for Flatbuffers/TfLite be compatible with Android # API level >= 23. They can be removed next flatbuffers offical release / update. -_FLATBUFFERS_GIT_COMMIT = "595bf0007ab1929570c7671f091313c8fc20644e" +_FLATBUFFERS_GIT_COMMIT = "e6463926479bd6b330cbcf673f7e917803fd5831" # curl -L https://github.com/google/flatbuffers/archive/<_FLATBUFFERS_GIT_COMMIT>.tar.gz | shasum -a 256 -_FLATBUFFERS_SHA256 = "987300083ec1f1b095d5596ef8fb657ba46c45d786bc866a5e9029d7590a5e48" +_FLATBUFFERS_SHA256 = "c9c6b8653597ed7ee5c62243979010bd0f09b29a46be414505bc5b58a874bb17" def repo(): tf_http_archive( diff --git a/third_party/llvm/toolchains.patch b/third_party/llvm/toolchains.patch index a4de4eaaff343a..1cbcfe31d3d072 100644 --- a/third_party/llvm/toolchains.patch +++ b/third_party/llvm/toolchains.patch @@ -2,7 +2,7 @@ diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llv index c43ab727e285..7d848d2dffae 100644 --- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel -@@ -30,6 +30,30 @@ exports_files([ +@@ -30,6 +30,36 @@ exports_files([ "utils/lit/lit.py", ]) @@ -22,6 +22,12 @@ index c43ab727e285..7d848d2dffae 100644 + }, +) + ++py_library( ++ name = "lit_lib", ++ testonly = True, ++ srcs = ["utils/lit/lit.py"] + glob(["utils/lit/lit/**/*.py"]), ++) ++ +config_setting( + name = "macos_x86_64", + values = { diff --git a/third_party/llvm/workspace.bzl b/third_party/llvm/workspace.bzl index cd453a40ec3bc0..e20b531ed2fe3b 100644 --- a/third_party/llvm/workspace.bzl +++ b/third_party/llvm/workspace.bzl @@ -4,8 +4,8 @@ load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" - LLVM_COMMIT = "fc57f88f007497a4ead0ec8607ac66e1847b02d6" - LLVM_SHA256 = "0b66773795454d466ef4dcfae7cf38c8200ac4ee431e069ddf68313b3486b004" + LLVM_COMMIT = "1e5f29af81a5f6fda308074f6345b9fba4faa71c" + LLVM_SHA256 = "f31b9a08cb74cffd89a0bc87b6a7ef327b54bc67af25e1503683d40817b22f2a" tf_http_archive( name = name, diff --git a/third_party/mkl_dnn/mkldnn_v1.BUILD b/third_party/mkl_dnn/mkldnn_v1.BUILD index cc9e66d77e3a77..d36831ea4b9f53 100644 --- a/third_party/mkl_dnn/mkldnn_v1.BUILD +++ b/third_party/mkl_dnn/mkldnn_v1.BUILD @@ -94,8 +94,8 @@ expand_template( out = "include/oneapi/dnnl/dnnl_version.h", substitutions = { "@DNNL_VERSION_MAJOR@": "3", - "@DNNL_VERSION_MINOR@": "3", - "@DNNL_VERSION_PATCH@": "4", + "@DNNL_VERSION_MINOR@": "4", + "@DNNL_VERSION_PATCH@": "1", "@DNNL_VERSION_HASH@": "N/A", }, template = "include/oneapi/dnnl/dnnl_version.h.in", diff --git a/third_party/py/BUILD b/third_party/py/BUILD index e69de29bb2d1d6..84eba77ce1a7af 100644 --- a/third_party/py/BUILD +++ b/third_party/py/BUILD @@ -0,0 +1,40 @@ +load("@python//:defs.bzl", "compile_pip_requirements") +load("@python_version_repo//:py_version.bzl", "REQUIREMENTS") + +compile_pip_requirements( + name = "requirements", + extra_args = [ + "--allow-unsafe", + "--build-isolation", + ], + generate_hashes = True, + requirements_in = "requirements.in", + requirements_txt = REQUIREMENTS, +) + +compile_pip_requirements( + name = "requirements_nightly", + data = ["test-requirements.txt"], + extra_args = [ + "--allow-unsafe", + "--build-isolation", + "--extra-index-url=https://pypi.anaconda.org/scientific-python-nightly-wheels/simple", + "--pre", + "--upgrade", + ], + generate_hashes = False, + requirements_in = "requirements.in", + requirements_txt = REQUIREMENTS, +) + +compile_pip_requirements( + name = "requirements_dev", + extra_args = [ + "--allow-unsafe", + "--build-isolation", + "--upgrade", + ], + generate_hashes = False, + requirements_in = "requirements.in", + requirements_txt = REQUIREMENTS, +) diff --git a/third_party/py/non_hermetic/BUILD b/third_party/py/non_hermetic/BUILD deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/third_party/py/non_hermetic/BUILD.tpl b/third_party/py/non_hermetic/BUILD.tpl deleted file mode 100644 index 45480bd4a31cf8..00000000000000 --- a/third_party/py/non_hermetic/BUILD.tpl +++ /dev/null @@ -1,80 +0,0 @@ -licenses(["restricted"]) - -package(default_visibility = ["//visibility:public"]) - -# Point both runtimes to the same python binary to ensure we always -# use the python binary specified by ./configure.py script. -load("@bazel_tools//tools/python:toolchain.bzl", "py_runtime_pair") - -py_runtime( - name = "py2_runtime", - interpreter_path = "%{PYTHON_BIN_PATH}", - python_version = "PY2", -) - -py_runtime( - name = "py3_runtime", - interpreter_path = "%{PYTHON_BIN_PATH}", - python_version = "PY3", -) - -py_runtime_pair( - name = "py_runtime_pair", - py2_runtime = ":py2_runtime", - py3_runtime = ":py3_runtime", -) - -toolchain( - name = "py_toolchain", - toolchain = ":py_runtime_pair", - toolchain_type = "@bazel_tools//tools/python:toolchain_type", - target_compatible_with = [%{PLATFORM_CONSTRAINT}], - exec_compatible_with = [%{PLATFORM_CONSTRAINT}], -) - -# To build Python C/C++ extension on Windows, we need to link to python import library pythonXY.lib -# See https://docs.python.org/3/extending/windows.html -cc_import( - name = "python_lib", - interface_library = select({ - ":windows": ":python_import_lib", - # A placeholder for Unix platforms which makes --no_build happy. - "//conditions:default": "not-existing.lib", - }), - system_provided = 1, -) - -cc_library( - name = "python_headers", - hdrs = [":python_include"], - deps = select({ - ":windows": [":python_lib"], - "//conditions:default": [], - }), - includes = ["python_include"], -) - -# This alias is exists for the use of targets in the @llvm-project dependency, -# which expect a python_headers target called @python_runtime//:headers. We use -# a repo_mapping to alias python_runtime to this package, and an alias to create -# the correct target. -alias( - name = "headers", - actual = ":python_headers", -) - -cc_library( - name = "numpy_headers", - hdrs = [":numpy_include"], - includes = ["numpy_include"], -) - -config_setting( - name = "windows", - values = {"cpu": "x64_windows"}, - visibility = ["//visibility:public"], -) - -%{PYTHON_INCLUDE_GENRULE} -%{NUMPY_INCLUDE_GENRULE} -%{PYTHON_IMPORT_LIB_GENRULE} \ No newline at end of file diff --git a/third_party/py/non_hermetic/README b/third_party/py/non_hermetic/README deleted file mode 100644 index 62188a5817a09e..00000000000000 --- a/third_party/py/non_hermetic/README +++ /dev/null @@ -1,3 +0,0 @@ -This is a temporary copy of python_configure repository rule. It allows -XLA and TSL to keep non-hermetic Python while TF uses hermetic Python. -DO NOT DEPEND ON THIS COPY as it will be deleted soon. \ No newline at end of file diff --git a/third_party/py/non_hermetic/python_configure.bzl b/third_party/py/non_hermetic/python_configure.bzl deleted file mode 100644 index 89732c3e33d8ee..00000000000000 --- a/third_party/py/non_hermetic/python_configure.bzl +++ /dev/null @@ -1,312 +0,0 @@ -"""Repository rule for Python autoconfiguration. - -`python_configure` depends on the following environment variables: - - * `PYTHON_BIN_PATH`: location of python binary. - * `PYTHON_LIB_PATH`: Location of python libraries. -""" - -load( - "//third_party/remote_config:common.bzl", - "BAZEL_SH", - "PYTHON_BIN_PATH", - "PYTHON_LIB_PATH", - "TF_PYTHON_CONFIG_REPO", - "auto_config_fail", - "config_repo_label", - "execute", - "get_bash_bin", - "get_host_environ", - "get_python_bin", - "is_windows", - "raw_exec", - "read_dir", -) - -def _genrule(src_dir, genrule_name, command, outs): - """Returns a string with a genrule. - - Genrule executes the given command and produces the given outputs. - """ - return ( - "genrule(\n" + - ' name = "' + - genrule_name + '",\n' + - " outs = [\n" + - outs + - "\n ],\n" + - ' cmd = """\n' + - command + - '\n """,\n' + - ")\n" - ) - -def _norm_path(path): - """Returns a path with '/' and remove the trailing slash.""" - path = path.replace("\\", "/") - if path[-1] == "/": - path = path[:-1] - return path - -def _symlink_genrule_for_dir( - repository_ctx, - src_dir, - dest_dir, - genrule_name, - src_files = [], - dest_files = []): - """Returns a genrule to symlink(or copy if on Windows) a set of files. - - If src_dir is passed, files will be read from the given directory; otherwise - we assume files are in src_files and dest_files - """ - if src_dir != None: - src_dir = _norm_path(src_dir) - dest_dir = _norm_path(dest_dir) - files = "\n".join(read_dir(repository_ctx, src_dir)) - - # Create a list with the src_dir stripped to use for outputs. - dest_files = files.replace(src_dir, "").splitlines() - src_files = files.splitlines() - command = [] - outs = [] - for i in range(len(dest_files)): - if dest_files[i] != "": - # If we have only one file to link we do not want to use the dest_dir, as - # $(@D) will include the full path to the file. - dest = "$(@D)/" + dest_dir + dest_files[i] if len(dest_files) != 1 else "$(@D)/" + dest_files[i] - - # Copy the headers to create a sandboxable setup. - cmd = "cp -f" - command.append(cmd + ' "%s" "%s"' % (src_files[i], dest)) - outs.append(' "' + dest_dir + dest_files[i] + '",') - genrule = _genrule( - src_dir, - genrule_name, - " && ".join(command), - "\n".join(outs), - ) - return genrule - -def _get_python_lib(repository_ctx, python_bin): - """Gets the python lib path.""" - python_lib = get_host_environ(repository_ctx, PYTHON_LIB_PATH) - if python_lib != None: - return python_lib - - # The interesting program to execute. - print_lib = [ - "from __future__ import print_function", - "import site", - "import os", - "python_paths = []", - "if os.getenv('PYTHONPATH') is not None:", - " python_paths = os.getenv('PYTHONPATH').split(':')", - "try:", - " library_paths = site.getsitepackages()", - "except AttributeError:", - " from distutils.sysconfig import get_python_lib", - " library_paths = [get_python_lib()]", - "all_paths = set(python_paths + library_paths)", - "paths = []", - "for path in all_paths:", - " if os.path.isdir(path):", - " paths.append(path)", - "if len(paths) >=1:", - " print(paths[0])", - ] - - # The below script writes the above program to a file - # and executes it. This is to work around the limitation - # of not being able to upload files as part of execute. - cmd = "from os import linesep;" - cmd += "f = open('script.py', 'w');" - for line in print_lib: - cmd += "f.write(\"%s\" + linesep);" % line - cmd += "f.close();" - cmd += "from subprocess import call;" - cmd += "call([\"%s\", \"script.py\"]);" % python_bin - - result = execute(repository_ctx, [python_bin, "-c", cmd]) - return result.stdout.strip() - -def _check_python_lib(repository_ctx, python_lib): - """Checks the python lib path.""" - cmd = 'test -d "%s" -a -x "%s"' % (python_lib, python_lib) - result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd]) - if result.return_code == 1: - auto_config_fail("Invalid python library path: %s" % python_lib) - -def _check_python_bin(repository_ctx, python_bin): - """Checks the python bin path.""" - cmd = '[[ -x "%s" ]] && [[ ! -d "%s" ]]' % (python_bin, python_bin) - result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd]) - if result.return_code == 1: - auto_config_fail("--define %s='%s' is not executable. Is it the python binary?" % ( - PYTHON_BIN_PATH, - python_bin, - )) - -def _get_python_include(repository_ctx, python_bin): - """Gets the python include path.""" - result = execute( - repository_ctx, - [ - python_bin, - "-Wignore", - "-c", - "import sysconfig; " + - "print(sysconfig.get_path('include'))", - ], - error_msg = "Problem getting python include path.", - error_details = ("Is the Python binary path set up right? " + - "(See ./configure or " + PYTHON_BIN_PATH + ".) " + - "Is distutils installed?"), - ) - return result.stdout.splitlines()[0] - -def _get_python_import_lib_name(repository_ctx, python_bin): - """Get Python import library name (pythonXY.lib) on Windows.""" - result = execute( - repository_ctx, - [ - python_bin, - "-c", - "import sys;" + - 'print("python" + str(sys.version_info[0]) + ' + - ' str(sys.version_info[1]) + ".lib")', - ], - error_msg = "Problem getting python import library.", - error_details = ("Is the Python binary path set up right? " + - "(See ./configure or " + PYTHON_BIN_PATH + ".) "), - ) - return result.stdout.splitlines()[0] - -def _get_numpy_include(repository_ctx, python_bin): - """Gets the numpy include path.""" - return execute( - repository_ctx, - [ - python_bin, - "-c", - "from __future__ import print_function;" + - "import numpy;" + - " print(numpy.get_include());", - ], - error_msg = "Problem getting numpy include path.", - error_details = "Is numpy installed?", - ).stdout.splitlines()[0] - -def _create_local_python_repository(repository_ctx): - """Creates the repository containing files set up to build with Python.""" - - # Resolve all labels before doing any real work. Resolving causes the - # function to be restarted with all previous state being lost. This - # can easily lead to a O(n^2) runtime in the number of labels. - build_tpl = repository_ctx.path(Label("//third_party/py:BUILD.tpl")) - - python_bin = get_python_bin(repository_ctx) - _check_python_bin(repository_ctx, python_bin) - python_lib = _get_python_lib(repository_ctx, python_bin) - _check_python_lib(repository_ctx, python_lib) - python_include = _get_python_include(repository_ctx, python_bin) - numpy_include = _get_numpy_include(repository_ctx, python_bin) + "/numpy" - python_include_rule = _symlink_genrule_for_dir( - repository_ctx, - python_include, - "python_include", - "python_include", - ) - python_import_lib_genrule = "" - - # To build Python C/C++ extension on Windows, we need to link to python import library pythonXY.lib - # See https://docs.python.org/3/extending/windows.html - if is_windows(repository_ctx): - python_bin = python_bin.replace("\\", "/") - python_include = _norm_path(python_include) - python_import_lib_name = _get_python_import_lib_name(repository_ctx, python_bin) - python_import_lib_src = python_include.rsplit("/", 1)[0] + "/libs/" + python_import_lib_name - python_import_lib_genrule = _symlink_genrule_for_dir( - repository_ctx, - None, - "", - "python_import_lib", - [python_import_lib_src], - [python_import_lib_name], - ) - numpy_include_rule = _symlink_genrule_for_dir( - repository_ctx, - numpy_include, - "numpy_include/numpy", - "numpy_include", - ) - - platform_constraint = "" - if repository_ctx.attr.platform_constraint: - platform_constraint = "\"%s\"" % repository_ctx.attr.platform_constraint - repository_ctx.template("BUILD", build_tpl, { - "%{PYTHON_BIN_PATH}": python_bin, - "%{PYTHON_INCLUDE_GENRULE}": python_include_rule, - "%{PYTHON_IMPORT_LIB_GENRULE}": python_import_lib_genrule, - "%{NUMPY_INCLUDE_GENRULE}": numpy_include_rule, - "%{PLATFORM_CONSTRAINT}": platform_constraint, - }) - -def _create_remote_python_repository(repository_ctx, remote_config_repo): - """Creates pointers to a remotely configured repo set up to build with Python. - """ - repository_ctx.template("BUILD", config_repo_label(remote_config_repo, ":BUILD"), {}) - -def _python_autoconf_impl(repository_ctx): - """Implementation of the python_autoconf repository rule.""" - if get_host_environ(repository_ctx, TF_PYTHON_CONFIG_REPO) != None: - _create_remote_python_repository( - repository_ctx, - get_host_environ(repository_ctx, TF_PYTHON_CONFIG_REPO), - ) - else: - _create_local_python_repository(repository_ctx) - -_ENVIRONS = [ - BAZEL_SH, - PYTHON_BIN_PATH, - PYTHON_LIB_PATH, -] - -local_python_configure = repository_rule( - implementation = _create_local_python_repository, - environ = _ENVIRONS, - attrs = { - "environ": attr.string_dict(), - "platform_constraint": attr.string(), - }, -) - -remote_python_configure = repository_rule( - implementation = _create_local_python_repository, - environ = _ENVIRONS, - remotable = True, - attrs = { - "environ": attr.string_dict(), - "platform_constraint": attr.string(), - }, -) - -python_configure = repository_rule( - implementation = _python_autoconf_impl, - environ = _ENVIRONS + [TF_PYTHON_CONFIG_REPO], - attrs = { - "platform_constraint": attr.string(), - }, -) -"""Detects and configures the local Python. - -Add the following to your WORKSPACE FILE: - -```python -python_configure(name = "local_config_python") -``` - -Args: - name: A unique name for this workspace rule. -""" diff --git a/third_party/py/python_init_pip.bzl b/third_party/py/python_init_pip.bzl new file mode 100644 index 00000000000000..efc2bf8233cf61 --- /dev/null +++ b/third_party/py/python_init_pip.bzl @@ -0,0 +1,34 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@python//:defs.bzl", "interpreter") +load("@python_version_repo//:py_version.bzl", "REQUIREMENTS") +load("@rules_python//python:pip.bzl", "package_annotation", "pip_parse") + +def python_init_pip(): + numpy_annotations = { + "numpy": package_annotation( + additive_build_content = """\ +cc_library( + name = "numpy_headers_2", + hdrs = glob(["site-packages/numpy/_core/include/**/*.h"]), + strip_include_prefix="site-packages/numpy/_core/include/", +) +cc_library( + name = "numpy_headers_1", + hdrs = glob(["site-packages/numpy/core/include/**/*.h"]), + strip_include_prefix="site-packages/numpy/core/include/", +) +cc_library( + name = "numpy_headers", + deps = [":numpy_headers_2", ":numpy_headers_1"], +) +""", + ), + } + + pip_parse( + name = "pypi", + annotations = numpy_annotations, + python_interpreter_target = interpreter, + requirements_lock = REQUIREMENTS, + ) diff --git a/third_party/py/python_init_repositories.bzl b/third_party/py/python_init_repositories.bzl new file mode 100644 index 00000000000000..5a405f2c2aba4c --- /dev/null +++ b/third_party/py/python_init_repositories.bzl @@ -0,0 +1,12 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@rules_python//python:repositories.bzl", "py_repositories") +load("//third_party/py:python_repo.bzl", "python_repository") + +def python_init_repositories(requirements = {}): + python_repository( + name = "python_version_repo", + requirements_versions = requirements.keys(), + requirements_locks = requirements.values(), + ) + py_repositories() diff --git a/third_party/py/python_init_rules.bzl b/third_party/py/python_init_rules.bzl new file mode 100644 index 00000000000000..98a7b8bc3c315a --- /dev/null +++ b/third_party/py/python_init_rules.bzl @@ -0,0 +1,11 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def python_init_rules(): + http_archive( + name = "rules_python", + sha256 = "9d04041ac92a0985e344235f5d946f71ac543f1b1565f2cdbc9a2aaee8adf55b", + strip_prefix = "rules_python-0.26.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.26.0/rules_python-0.26.0.tar.gz", + ) diff --git a/third_party/py/python_init_toolchains.bzl b/third_party/py/python_init_toolchains.bzl new file mode 100644 index 00000000000000..c1f800db4c01e7 --- /dev/null +++ b/third_party/py/python_init_toolchains.bzl @@ -0,0 +1,13 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@python_version_repo//:py_version.bzl", "HERMETIC_PYTHON_VERSION") +load("@rules_python//python:repositories.bzl", "python_register_toolchains") +load("@rules_python//python:versions.bzl", "MINOR_MAPPING") + +def python_init_toolchains(): + if HERMETIC_PYTHON_VERSION in MINOR_MAPPING: + python_register_toolchains( + name = "python", + ignore_root_user_error = True, + python_version = HERMETIC_PYTHON_VERSION, + ) diff --git a/third_party/py/python_repo.bzl b/third_party/py/python_repo.bzl new file mode 100644 index 00000000000000..77a6ce9ce50b60 --- /dev/null +++ b/third_party/py/python_repo.bzl @@ -0,0 +1,206 @@ +""" +Repository rule to manage hermetic Python interpreter under Bazel. + +Version can be set via build parameter "--repo_env=HERMETIC_PYTHON_VERSION=3.11" +Defaults to 3.11. + +To set wheel name, add "--repo_env=WHEEL_NAME=tensorflow_cpu" +""" + +VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] +DEFAULT_VERSION = "3.11" +WARNING = """ +HERMETIC_PYTHON_VERSION variable was not set correctly, using default version. +Python {} will be used. +To select Python version, either set HERMETIC_PYTHON_VERSION env variable in +your shell: + export HERMETIC_PYTHON_VERSION=3.12 +OR pass it as an argument to bazel command directly or inside your .bazelrc +file: + --repo_env=HERMETIC_PYTHON_VERSION=3.12 +""".format(DEFAULT_VERSION) + +content = """TF_PYTHON_VERSION = "{version}" +HERMETIC_PYTHON_VERSION = "{version}" +WHEEL_NAME = "{wheel_name}" +WHEEL_COLLAB = "{wheel_collab}" +REQUIREMENTS = "{requirements}" +""" + +def _python_repository_impl(ctx): + ctx.file("BUILD", "") + version_legacy = ctx.os.environ.get("TF_PYTHON_VERSION", "") + version = ctx.os.environ.get("HERMETIC_PYTHON_VERSION", "") + if not version: + version = version_legacy + else: + version_legacy = version + + wheel_name = ctx.os.environ.get("WHEEL_NAME", "tensorflow") + wheel_collab = ctx.os.environ.get("WHEEL_COLLAB", False) + if version not in VERSIONS: + print(WARNING) # buildifier: disable=print + version = DEFAULT_VERSION + else: + print("Using hermetic Python %s" % version) # buildifier: disable=print + + requirements = "" + for i in range(0, len(ctx.attr.requirements_locks)): + if ctx.attr.requirements_versions[i] == version: + requirements = ctx.attr.requirements_locks[i] + break + + ctx.file( + "py_version.bzl", + content.format( + version = version, + wheel_name = wheel_name, + wheel_collab = wheel_collab, + requirements = str(requirements), + ), + ) + +python_repository = repository_rule( + implementation = _python_repository_impl, + attrs = { + "requirements_versions": attr.string_list( + mandatory = False, + default = [], + ), + "requirements_locks": attr.label_list( + mandatory = False, + default = [], + ), + }, + environ = [ + "TF_PYTHON_VERSION", + "HERMETIC_PYTHON_VERSION", + "WHEEL_NAME", + "WHEEL_COLLAB", + ], +) + +def _custom_python_interpreter_impl(ctx): + version = ctx.attr.version + strip_prefix = ctx.attr.strip_prefix.format(version = version) + urls = [url.format(version = version) for url in ctx.attr.urls] + binary_name = ctx.attr.binary_name + if not binary_name: + ver_chunks = version.split(".") + binary_name = "python%s.%s" % (ver_chunks[0], ver_chunks[1]) + + install_dir = "{name}-{version}".format(name = ctx.attr.name, version = version) + _exec_and_check(ctx, ["mkdir", install_dir]) + install_path = ctx.path(install_dir) + srcs_dir = "srcs" + ctx.download_and_extract( + url = urls, + stripPrefix = strip_prefix, + output = srcs_dir, + ) + + configure_params = [] + if "CC" in ctx.os.environ: + configure_params.append("CC={}".format(ctx.os.environ["CC"])) + if "CXX" in ctx.os.environ: + configure_params.append("CXX={}".format(ctx.os.environ["CXX"])) + + configure_params.append("--enable-optimizations") + configure_params.append("--prefix=%s" % install_path.realpath) + _exec_and_check( + ctx, + ["./configure"] + configure_params, + working_directory = srcs_dir, + quiet = False, + ) + res = _exec_and_check(ctx, ["nproc"]) + cores = 12 if res.return_code != 0 else max(1, int(res.stdout.strip()) - 1) + _exec_and_check(ctx, ["make", "-j%s" % cores], working_directory = srcs_dir) + _exec_and_check(ctx, ["make", "altinstall"], working_directory = srcs_dir) + _exec_and_check(ctx, ["ln", "-s", binary_name, "python3"], working_directory = install_dir + "/bin") + tar = "{install_dir}.tgz".format(install_dir = install_dir) + _exec_and_check(ctx, ["tar", "czpf", tar, install_dir]) + _exec_and_check(ctx, ["rm", "-rf", srcs_dir]) + res = _exec_and_check(ctx, ["sha256sum", tar]) + + sha256 = res.stdout.split(" ")[0].strip() + tar_path = ctx.path(tar) + + example = """\n\n +To use newly built Python interpreter add the following code snippet RIGHT AFTER +python_init_toolchains() in your WORKSPACE file. The code sample should work as +is but it may need some tuning, if you have special requirements. + +``` +load("@rules_python//python:repositories.bzl", "python_register_toolchains") +python_register_toolchains( + name = "python", + # By default assume the interpreter is on the local file system, replace + # with proper URL if it is not the case. + base_url = "file://", + ignore_root_user_error = True, + python_version = "{version}", + tool_versions = {{ + "{version}": {{ + # Path to .tar.gz with Python binary. By default it points to .tgz + # file in cache where it was built originally; replace with proper + # file location, if you moved it somewhere else. + "url": "{tar_path}", + "sha256": {{ + # By default we assume Linux x86_64 architecture, eplace with + # proper architecture if you were building on a different platform. + "x86_64-unknown-linux-gnu": "{sha256}", + }}, + "strip_prefix": "{install_dir}", + }}, + }}, +) +``` +\n\n""".format(version = version, tar_path = tar_path, sha256 = sha256, install_dir = install_dir) + + instructions = "INSTRUCTIONS-{version}.md".format(version = version) + ctx.file(instructions + ".tmpl", example, executable = False) + ctx.file( + "BUILD.bazel", + """ +genrule( + name = "{name}", + srcs = ["{tar}", "{instructions}.tmpl"], + outs = ["{install_dir}.tar.gz", "{instructions}"], + cmd = "cp $(location {tar}) $(location {install_dir}.tar.gz); cp $(location {instructions}.tmpl) $(location {instructions})", + visibility = ["//visibility:public"], +) + """.format( + name = ctx.attr.name, + tar = tar, + install_dir = install_dir, + instructions = instructions, + ), + executable = False, + ) + + print(example) # buildifier: disable=print + +custom_python_interpreter = repository_rule( + implementation = _custom_python_interpreter_impl, + attrs = { + "urls": attr.string_list(), + "strip_prefix": attr.string(), + "binary_name": attr.string(mandatory = False), + "version": attr.string(), + }, +) + +def _exec_and_check(ctx, command, fail_on_error = True, quiet = False, **kwargs): + res = ctx.execute(command, quiet = quiet, **kwargs) + if fail_on_error and res.return_code != 0: + fail(""" +Failed to execute command: `{command}` +Exit Code: {code} +STDERR: {stderr} + """.format( + command = command, + code = res.return_code, + stderr = res.stderr, + )) + return res diff --git a/third_party/stablehlo/temporary.patch b/third_party/stablehlo/temporary.patch index 2e395d99d26cf4..85a49110b59f95 100755 --- a/third_party/stablehlo/temporary.patch +++ b/third_party/stablehlo/temporary.patch @@ -164,81 +164,6 @@ diff --ruN a/stablehlo/CMakeLists.txt b/stablehlo/CMakeLists.txt #------------------------------------------------------------------------------- # Directory setup -diff --ruN a/stablehlo/docs/spec.md b/stablehlo/docs/spec.md ---- stablehlo/docs/spec.md -+++ stablehlo/docs/spec.md -@@ -2532,10 +2532,10 @@ - rhs_batching_dimensions, lhs_contracting_dimensions, - rhs_contracting_dimensions, precision_config), lhs, rhs, type(result))`. - --For hybrid quantized types, performs `hybrid_dequantize_then_op( -- lambda lhs, rhs: dot_general(lhs, rhs, lhs_batching_dimensions, -- rhs_batching_dimensions, lhs_contracting_dimensions, -- rhs_contracting_dimensions, precision_config), lhs, rhs)`. -+This only specifies semantics for per-tensor quantization. Per-axis quantization -+is work in progress ([#1574](https://github.com/openxla/stablehlo/issues/1574)). -+Also, in the future we may consider adding support for hybrid quantization -+ ([#1575](https://github.com/openxla/stablehlo/issues/1575)). - - `precision_config` controls the tradeoff between speed and accuracy for - computations on accelerator backends. This can be one of the following (at the -@@ -2552,21 +2552,21 @@ - - #### Inputs - --| Label | Name | Type | Constraints | --|-------|------------------------------|--------------------------------------------------------------|------------------------------------------------| --| (I1) | `lhs` | tensor or per-tensor quantized tensor | (C5-C6), (C9-C10), (C12-C14), (C17-C18), (C20) | --| (I2) | `rhs` | tensor or quantized tensor | (C7-C10), (C12-C20) | --| (I3) | `lhs_batching_dimensions` | 1-dimensional tensor constant of type `si64` | (C1), (C3), (C5), (C9), (C12) | --| (I4) | `rhs_batching_dimensions` | 1-dimensional tensor constant of type `si64` | (C1), (C4), (C7), (C9) | --| (I5) | `lhs_contracting_dimensions` | 1-dimensional tensor constant of type `si64` | (C2), (C3), (C6), (C10) | --| (I6) | `rhs_contracting_dimensions` | 1-dimensional tensor constant of type `si64` | (C2), (C4), (C8), (C10), (C16) | --| (I7) | `precision_config` | variadic number of enums of `DEFAULT`, `HIGH`, and `HIGHEST` | (C11) | -+| Label | Name | Type | Constraints | -+|-------|------------------------------|--------------------------------------------------------------|--------------------------------| -+| (I1) | `lhs` | tensor or per-tensor quantized tensor | (C5-C6), (C9-C10), (C12-C16) | -+| (I2) | `rhs` | tensor or quantized tensor | (C7-C10), (C12), (C18-C19) | -+| (I3) | `lhs_batching_dimensions` | 1-dimensional tensor constant of type `si64` | (C1), (C3), (C5), (C9), (C12) | -+| (I4) | `rhs_batching_dimensions` | 1-dimensional tensor constant of type `si64` | (C1), (C4), (C7), (C9) | -+| (I5) | `lhs_contracting_dimensions` | 1-dimensional tensor constant of type `si64` | (C2), (C3), (C6), (C10) | -+| (I6) | `rhs_contracting_dimensions` | 1-dimensional tensor constant of type `si64` | (C2), (C4), (C8), (C10), (C19) | -+| (I7) | `precision_config` | variadic number of enums of `DEFAULT`, `HIGH`, and `HIGHEST` | (C11) | - - #### Outputs - - | Name | Type | Constraints | - |----------|----------------------------|----------------------------| --| `result` | tensor or quantized tensor | (C12), (C14), (C18-C20) | -+| `result` | tensor or quantized tensor | (C12), (C14), (C16), (C18) | - - #### Constraints - -@@ -2589,17 +2589,14 @@ - * If the operation uses non-quantized tensors: - * (C13) `element_type(lhs) = element_type(rhs)`. - * If the operation uses quantized tensors: -- * (C14) `is_quantized(lhs) = is_quantized(result) and is_quantized(rhs)`. -- * (C15) `zero_points(rhs) = 0`. -- * (C16) If `is_per_axis_quantized(rhs)`, then -+ * (C14) `is_quantized(lhs) and is_quantized(rhs) and is_quantized(result)`. -+ * (C15) `storage_type(lhs) = storage_type(rhs)`. -+ * (C16) `expressed_type(lhs) = expressed_type(rhs) = expressed_type(result)`. -+ * (C17) `zero_points(rhs) = 0`. -+ * (C18) If `is_per_tensor_quantized(rhs)`, then -+ `is_per_tensor_quantized(result)`. -+ * (C19) If `is_per_axis_quantized(rhs)`, then - `quantization_dimension(rhs)` not in `rhs_contracting_dimensions`. -- * If `is_quantized(lhs)`: -- * (C17) `storage_type(lhs) = storage_type(rhs)`. -- * (C18) `expressed_type(lhs) = expressed_type(rhs) = expressed_type(result)`. -- * (C19) If `is_per_tensor_quantized(rhs)`, then -- `is_per_tensor_quantized(result)`. -- * If `!is_quantized(lhs)`: -- * (C20) `element_type(lhs) = expressed_type(rhs) = element_type(result)`. - - #### Examples - diff --ruN a/stablehlo/stablehlo/CMakeLists.txt b/stablehlo/stablehlo/CMakeLists.txt --- stablehlo/stablehlo/CMakeLists.txt +++ stablehlo/stablehlo/CMakeLists.txt @@ -250,273 +175,53 @@ diff --ruN a/stablehlo/stablehlo/CMakeLists.txt b/stablehlo/stablehlo/CMakeLists add_subdirectory(integrations) add_subdirectory(reference) add_subdirectory(tests) -diff --ruN a/stablehlo/stablehlo/conversions/linalg/tests/convolution.mlir b/stablehlo/stablehlo/conversions/linalg/tests/convolution.mlir ---- stablehlo/stablehlo/conversions/linalg/tests/convolution.mlir -+++ stablehlo/stablehlo/conversions/linalg/tests/convolution.mlir -@@ -356,7 +356,7 @@ - } - // CHECK-DAG: %[[CST:.+]] = arith.constant 0.000000e+00 : f32 - // CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[FILTER]] {{\[}}[0, 1, 2, 3]] : tensor<2x2x1x6xf32> into tensor<24xf32> --// CHECK: %[[EXPAND:.+]] = tensor.expand_shape %[[COLLAPSE]] {{\[}}[0, 1, 2, 3]] : tensor<24xf32> into tensor<2x2x2x3xf32> -+// CHECK: %[[EXPAND:.+]] = tensor.expand_shape %[[COLLAPSE]] {{\[}}[0, 1, 2, 3]] output_shape [2, 2, 2, 3] : tensor<24xf32> into tensor<2x2x2x3xf32> - // CHECK: %[[INIT:.+]] = tensor.empty() : tensor<2x3x4x2x3xf32> - // CHECK: %[[FILL:.+]] = linalg.fill ins(%[[CST]] : f32) outs(%[[INIT]] : tensor<2x3x4x2x3xf32>) -> tensor<2x3x4x2x3xf32> - // CHECK: %[[OUT:.+]] = linalg.depthwise_conv_2d_nhwc_hwcm -diff --ruN a/stablehlo/stablehlo/conversions/linalg/tests/miscellaneous.mlir b/stablehlo/stablehlo/conversions/linalg/tests/miscellaneous.mlir ---- stablehlo/stablehlo/conversions/linalg/tests/miscellaneous.mlir -+++ stablehlo/stablehlo/conversions/linalg/tests/miscellaneous.mlir -@@ -865,7 +865,7 @@ - %0 = "stablehlo.reshape"(%arg0) : (tensor) -> tensor<1xi32> - func.return %0 : tensor<1xi32> - } --// CHECK: tensor.expand_shape %{{.*}} [] : tensor into tensor<1xi32> -+// CHECK: tensor.expand_shape %{{.*}} [] output_shape [1] : tensor into tensor<1xi32> - - // ----- - -@@ -876,7 +876,7 @@ - func.return %0 : tensor<1xui32> - } - // CHECK: %[[ARG_SIGNLESS:.*]] = builtin.unrealized_conversion_cast %[[ARG_UNSIGNED]] : tensor to tensor --// CHECK: %[[RET_SIGNLESS:.*]] = tensor.expand_shape %[[ARG_SIGNLESS]] [] : tensor into tensor<1xi32> -+// CHECK: %[[RET_SIGNLESS:.*]] = tensor.expand_shape %[[ARG_SIGNLESS]] [] output_shape [1] : tensor into tensor<1xi32> - // CHECK: %[[RET_UNSIGNED:.*]] = builtin.unrealized_conversion_cast %[[RET_SIGNLESS]] : tensor<1xi32> to tensor<1xui32> - // CHECK: return %[[RET_UNSIGNED]] : tensor<1xui32> - -@@ -978,7 +978,7 @@ - } - // CHECK: %[[FLATTEN:.*]] = tensor.collapse_shape %{{.*}} {{\[}}[0, 1]] : tensor into tensor - // CHECK: %[[CAST:.*]] = tensor.cast %[[FLATTEN]] : tensor to tensor<40xf32> --// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1, 2]] : tensor<40xf32> into tensor<2x4x5xf32> -+// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1, 2]] output_shape [2, 4, 5] : tensor<40xf32> into tensor<2x4x5xf32> - - // ----- - -@@ -988,7 +988,7 @@ - func.return %0 : tensor<1x3xi32> - } - // CHECK: %[[CAST:.*]] = tensor.cast %{{.*}} : tensor to tensor<3xi32> --// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1]] : tensor<3xi32> into tensor<1x3xi32> -+// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1]] output_shape [1, 3] : tensor<3xi32> into tensor<1x3xi32> - - // ----- - -diff --ruN a/stablehlo/stablehlo/conversions/linalg/tests/random.mlir b/stablehlo/stablehlo/conversions/linalg/tests/random.mlir ---- stablehlo/stablehlo/conversions/linalg/tests/random.mlir -+++ stablehlo/stablehlo/conversions/linalg/tests/random.mlir -@@ -480,8 +480,8 @@ - // CHECK-DAG: %[[VAL_101:.*]] = arith.xori %[[VAL_100]], %[[VAL_87]] : i32 - - // CHECK: linalg.yield %[[YIELDED_1:.*]], %[[YIELDED_2:.*]] : i64, i64 --// CHECK-DAG: %[[VAL_206:.*]] = tensor.expand_shape %[[VAL_207:.*]]#0 {{\[\[}}0, 1]] : tensor<4xi64> into tensor<4x1xi64> --// CHECK-DAG: %[[VAL_208:.*]] = tensor.expand_shape %[[VAL_207]]#1 {{\[\[}}0, 1]] : tensor<4xi64> into tensor<4x1xi64> -+// CHECK-DAG: %[[VAL_206:.*]] = tensor.expand_shape %[[VAL_207:.*]]#0 {{\[\[}}0, 1]] -+// CHECK-DAG: %[[VAL_208:.*]] = tensor.expand_shape %[[VAL_207]]#1 {{\[\[}}0, 1]] - // CHECK-DAG: %[[VAL_209:.*]] = tensor.empty() : tensor<4x2xi64> - // CHECK-DAG: %[[VAL_213:.*]] = tensor.insert %[[VAL_30]] into %[[VAL_0]]{{\[}}%[[VAL_19]]] : tensor<2xi64> - -@@ -575,10 +575,10 @@ - // CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[CONCAT]] - - --// CHECK: %[[VAL_213:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] : tensor<80xi32> into tensor<80x1xi32> -+// CHECK: %[[VAL_213:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] - // CHECK: %[[VAL_214:.*]] = tensor.extract_slice %[[VAL_213]][0, 0] [77, 1] [1, 1] : tensor<80x1xi32> to tensor<77x1xi32> - // CHECK: %[[VAL_215:.*]] = tensor.collapse_shape %[[VAL_214]] {{\[\[}}0, 1]] : tensor<77x1xi32> into tensor<77xi32> --// CHECK: %[[VAL_216:.*]] = tensor.expand_shape %[[VAL_215]] {{\[\[}}0, 1]] : tensor<77xi32> into tensor<7x11xi32> -+// CHECK: %[[VAL_216:.*]] = tensor.expand_shape %[[VAL_215]] {{\[\[}}0, 1]] - // CHECK: %[[VAL_217:.*]] = tensor.insert %[[NEWSTATE]] into %[[ARG0]]{{\[}}%[[C1]]] : tensor<2xi64> - // CHECK: return %[[VAL_217]], %[[VAL_216]] : tensor<2xi64>, tensor<7x11xi32> - -@@ -616,10 +616,10 @@ - // CHECK-DAG: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[CONCAT]] {{\[\[}}0, 1]] : tensor<8x2xi64> into tensor<16xi64> - - --// CHECK-DAG: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] : tensor<16xi64> into tensor<16x1xi64> -+// CHECK-DAG: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] - // CHECK-DAG: %[[SLICE:.*]] = tensor.extract_slice %[[EXPANDED]][0, 0] [15, 1] [1, 1] : tensor<16x1xi64> to tensor<15x1xi64> - // CHECK-DAG: %[[EXPAND_2:.*]] = tensor.collapse_shape %[[SLICE]] {{\[\[}}0, 1]] : tensor<15x1xi64> into tensor<15xi64> --// CHECK-DAG: %[[RESHAPE:.*]] = tensor.expand_shape %[[EXPAND_2]] {{\[\[}}0, 1]] : tensor<15xi64> into tensor<3x5xi64> -+// CHECK-DAG: %[[RESHAPE:.*]] = tensor.expand_shape %[[EXPAND_2]] {{\[\[}}0, 1]] - // CHECK-DAG: %[[INSERTED:.+]] = tensor.insert %[[NEWSTATE]] into %[[ARG0]][%[[C1]]] : tensor<2xi64> - // CHECK: return %[[INSERTED]], %[[RESHAPE]] - diff --ruN a/stablehlo/stablehlo/conversions/tosa/tests/binary.mlir b/stablehlo/stablehlo/conversions/tosa/tests/binary.mlir --- stablehlo/stablehlo/conversions/tosa/tests/binary.mlir +++ stablehlo/stablehlo/conversions/tosa/tests/binary.mlir -@@ -45,14 +45,14 @@ - - // CHECK-LABEL: @divide - func.func @divide(%arg0 : tensor<10xi32>, %arg1 : tensor<10xi32>) -> tensor<10xi32> { -- // CHECK: tosa.div -+ // CHECK: tosa.int_div - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi32> - return %0 : tensor<10xi32> - } - - // CHECK-LABEL: @divide_f32 - func.func @divide_f32(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10xf32> { -- // tosa.div only supports i32, so this should not legalize. -+ // tosa.int_div only supports i32, so this should not legalize. - // CHECK: stablehlo.divide - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32> - return %0 : tensor<10xf32> -diff --ruN a/stablehlo/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll b/stablehlo/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll ---- stablehlo/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll -+++ stablehlo/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll -@@ -125,7 +125,7 @@ - Pattern => - replace op(input0 : Value<_: Tosa_Int32Tensor>, - input1 : Value<_: Tosa_Int32Tensor>) -- with op(input0, input1); -+ with op(input0, input1); - Pattern => - replace op(input0 : Value<_: Tosa_Tensor>, - input1 : Value<_: Tosa_Tensor>) -diff --ruN a/stablehlo/stablehlo/dialect/Base.cpp b/stablehlo/stablehlo/dialect/Base.cpp ---- stablehlo/stablehlo/dialect/Base.cpp -+++ stablehlo/stablehlo/dialect/Base.cpp -@@ -651,14 +651,14 @@ - - // quantized_type_c5 - auto maxPosFiniteNum = -- APFloat::getLargest(quantizedElementType.getExpressedType() -- .cast() -- .getFloatSemantics()) -+ APFloat::getLargest( -+ cast(quantizedElementType.getExpressedType()) -+ .getFloatSemantics()) - .convertToDouble(); - auto minPosFiniteNum = -- APFloat::getSmallest(quantizedElementType.getExpressedType() -- .cast() -- .getFloatSemantics()) -+ APFloat::getSmallest( -+ cast(quantizedElementType.getExpressedType()) -+ .getFloatSemantics()) - .convertToDouble(); - if (llvm::any_of(scales, [&](double scale) { - return scale < minPosFiniteNum || scale > maxPosFiniteNum; -diff --ruN a/stablehlo/stablehlo/dialect/TypeInference.cpp b/stablehlo/stablehlo/dialect/TypeInference.cpp ---- stablehlo/stablehlo/dialect/TypeInference.cpp -+++ stablehlo/stablehlo/dialect/TypeInference.cpp -@@ -171,7 +171,7 @@ - LogicalResult verifyConvolutionDotGeneralCommonQuantizationConstraints( - std::optional location, Type lhsElementType, Type rhsElementType, - Type resultElementType) { -- // convolution_c28 and dot_general_c14 -+ // convolution_c28 - if (!isa(rhsElementType) || - (isa(lhsElementType) != - isa(resultElementType))) { -@@ -184,19 +184,19 @@ - auto rhsQuantType = cast(rhsElementType); - if (auto lhsQuantType = dyn_cast(lhsElementType)) { - auto resultQuantType = cast(resultElementType); -- // convolution_c31 and dot_general_c17 -+ // convolution_c31 - if (lhsQuantType.getStorageType() != rhsQuantType.getStorageType()) { - return emitOptionalError( - location, "mismatched lhs and rhs quantization storage types"); - } -- // convolution_c32 and dot_general_c18 -+ // convolution_c32 - if (lhsQuantType.getExpressedType() != rhsQuantType.getExpressedType() || - lhsQuantType.getExpressedType() != resultQuantType.getExpressedType()) { - return emitOptionalError( - location, - "mismatched lhs, rhs and result quantization expressed types"); - } -- // convolution_c33 and dot_general_c19 -+ // convolution_c33 - if (isa(rhsQuantType) && - !isa(resultQuantType)) { - return emitOptionalError( -@@ -204,7 +204,7 @@ - } - } else { - Type rhsExpressedType = rhsQuantType.getExpressedType(); -- // convolution_c34 and dot_general_c20 -+ // convolution_c34 - if (lhsElementType != rhsExpressedType || - lhsElementType != resultElementType) { - return emitOptionalError(location, -@@ -3559,7 +3559,7 @@ - } - } - -- // convolution_c28, convolution_c31 - convolution_c34 -+ // convolution_c31 - convolution_c34 - return verifyConvolutionDotGeneralCommonQuantizationConstraints( - location, lhsElementType, rhsElementType, resultElementType); - } -@@ -3626,41 +3626,6 @@ - return success(); - } - --LogicalResult verifyDotGeneralOpQuantizationConstraints( -- std::optional location, Type lhsType, Type rhsType, -- Type resultType, ArrayRef rhsContractingDimensions) { -- Type lhsElementType = getElementTypeOrSelf(lhsType); -- Type rhsElementType = getElementTypeOrSelf(rhsType); -- Type resultElementType = getElementTypeOrSelf(resultType); -- -- // dot_general_c15 -- if (auto rhsPerTensorQuantType = -- dyn_cast(rhsElementType)) { -- if (rhsPerTensorQuantType.getZeroPoint() != 0) { -- return emitOptionalError(location, "Zero point of rhs should be 0"); -- } -- } else if (auto rhsPerAxisQuantType = -- dyn_cast(rhsElementType)) { -- if (llvm::any_of(rhsPerAxisQuantType.getZeroPoints(), -- [](int64_t zero_point) { return zero_point != 0; })) { -- return emitOptionalError(location, "Zero points of rhs should be 0"); -- } -- -- // dot_general_c16 -- if (llvm::is_contained(rhsContractingDimensions, -- rhsPerAxisQuantType.getQuantizedDimension())) { -- return emitOptionalError( -- location, -- "Quantization dimension of rhs should not be in the " -- "contracting dimension of rhs"); -- } -- } -- -- // dot_general_c14, dot_general_c17 - dot_general_c20 -- return verifyConvolutionDotGeneralCommonQuantizationConstraints( -- location, lhsElementType, rhsElementType, resultElementType); --} -- - LogicalResult verifyDotGeneralOp(std::optional location, Value lhs, - Value rhs, - ArrayRef lhsBatchingDimensions, -@@ -3683,13 +3648,6 @@ - return emitOptionalError( - location, "inferred shape '", dimSizesToString(inferredShape.getDims()), - "' ", "is incompatible with return type of operation ", resultType, ""); -- -- Type lhsType = lhs.getType(); -- Type rhsType = rhs.getType(); -- if (anyQuantized({lhsType, rhsType, resultType})) { -- return verifyDotGeneralOpQuantizationConstraints( -- location, lhsType, rhsType, resultType, rhsContractingDimensions); -- } - return success(); +@@ -155,7 +155,7 @@ + + // CHECK-LABEL: @maximum_f64 + func.func @maximum_f64(%arg0 : tensor<10xf64>, %arg1 : tensor<10xf64>) -> tensor<10xf64> { +- // CHECK: stablehlo.maximum ++ // CHECK: tosa.maximum + %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor<10xf64>, tensor<10xf64>) -> tensor<10xf64> + return %0 : tensor<10xf64> + } +diff --ruN a/stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir b/stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir +--- stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir ++++ stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir +@@ -9,8 +9,7 @@ + + // CHECK-LABEL: @constant_f64 + func.func @constant_f64() -> tensor<10xf64> { +- // TOSA does not support 64-bit types, so this should not legalize. +- // CHECK: stablehlo.constant ++ // CHECK: tosa.const + %0 = stablehlo.constant dense<0.000000e+00> : tensor<10xf64> + return %0 : tensor<10xf64> + } +diff --ruN a/stablehlo/stablehlo/dialect/AssemblyFormat.cpp b/stablehlo/stablehlo/dialect/AssemblyFormat.cpp +--- stablehlo/stablehlo/dialect/AssemblyFormat.cpp ++++ stablehlo/stablehlo/dialect/AssemblyFormat.cpp +@@ -305,8 +305,7 @@ + bool isCommutativeNoRegionMatchingDialect(OperationName innerOp, + StringRef reduceOpDialect) { + auto innerOpDialect = innerOp.getDialect(); +- return innerOpDialect && +- innerOpDialect->getNamespace().equals(reduceOpDialect) && ++ return innerOpDialect && innerOpDialect->getNamespace() == reduceOpDialect && + innerOp.hasTrait::Impl>() && + innerOp.hasTrait() && + (innerOp.hasTrait() || +@@ -359,7 +358,7 @@ + // Check E5. + LLVM_DEBUG(llvm::dbgs() << "Checking ReduceOp compact print E5\n"); + auto retOp = block.getTerminator(); +- if (!retOp->getName().stripDialect().equals("return")) return false; ++ if (retOp->getName().stripDialect() != "return") return false; + + return llvm::equal(innerOp.getResults(), retOp->getOperands()); } - -@@ -3861,8 +3819,8 @@ - if (SmallVector shape; operandType.hasStaticShape() && - matchInts(outputShape, shape).succeeded()) { - int64_t operandCount = operandType.getNumElements(); -- int64_t shapeCount = std::accumulate(shape.begin(), shape.end(), 1, -- std::multiplies()); -+ int64_t shapeCount = std::accumulate( -+ shape.begin(), shape.end(), int64_t{1}, std::multiplies()); - if (operandCount != shapeCount) { - return emitOptionalError(location, - "output_shape is incompatible with input type " diff --ruN a/stablehlo/stablehlo/experimental/BUILD.bazel b/stablehlo/stablehlo/experimental/BUILD.bazel --- stablehlo/stablehlo/experimental/BUILD.bazel +++ stablehlo/stablehlo/experimental/BUILD.bazel @@ -2906,23052 +2611,37 @@ diff --ruN a/stablehlo/stablehlo/experimental/transforms/StablehloRefineShapes.c +} // namespace experimental +} // namespace stablehlo +} // namespace mlir -diff --ruN a/stablehlo/stablehlo/integrations/python/tests/stablehlo.py b/stablehlo/stablehlo/integrations/python/tests/stablehlo.py ---- stablehlo/stablehlo/integrations/python/tests/stablehlo.py -+++ stablehlo/stablehlo/integrations/python/tests/stablehlo.py -@@ -241,18 +241,18 @@ - # Formatted as (tensor_type, np_value) - # Program runs arg + arg, which is used for expected value - tests = [ -- # No numpy types for f8 - skipping fp8 tests -- ("f16", np.asarray(1, np.float16)), -- ("f32", np.asarray(2, np.float32)), -- ("f64", np.asarray(3, np.double)), -- ("1xi8", np.asarray([4], np.int8)), -- ("1xi16", np.asarray([5], np.int16)), -- ("1xi32", np.asarray([-6], np.int32)), -- # Numpy's uint treated as int by DenseElementsAttr, skipping np.uint tests -- ("2x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2,2)), -- ("2x1x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2,1,2)), -- ("?x?xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2,2)), -- ("?x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2,2)), -+ # No numpy types for f8 - skipping fp8 tests -+ ("f16", np.asarray(1, np.float16)), -+ ("f32", np.asarray(2, np.float32)), -+ ("f64", np.asarray(3, np.double)), -+ ("1xi8", np.asarray([4], np.int8)), -+ ("1xi16", np.asarray([5], np.int16)), -+ ("1xi32", np.asarray([-6], np.int32)), -+ # Numpy's uint treated as int by DenseElementsAttr, skipping np.uint tests -+ ("2x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2, 2)), -+ ("2x1x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2, 1, 2)), -+ ("?x?xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2, 2)), -+ ("?x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2, 2)), - ] - for test in tests: - tensor_type, arg = test -diff --ruN a/stablehlo/stablehlo/tests/ops_stablehlo_quantized.mlir b/stablehlo/stablehlo/tests/ops_stablehlo_quantized.mlir ---- stablehlo/stablehlo/tests/ops_stablehlo_quantized.mlir -+++ stablehlo/stablehlo/tests/ops_stablehlo_quantized.mlir -@@ -1066,146 +1066,6 @@ - - // ----- - --func.func @dot_general_hybrid_quantized(%arg0: tensor<2x3x4xf32>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> { -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4xf32>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> -- func.return %0 : tensor<2x4x5xf32> --} -- --// ----- -- --func.func @dot_general_c14(%arg0: tensor<2x3x4xf32>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{rhs should be quantized for quantized operations and is_quantized(lhs)=is_quantized(result) should hold}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4xf32>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> -- func.return %0 : tensor<2x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c15_per_tensor(%arg0: tensor<2x3x4xf32>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> { -- // expected-error@+1 {{Zero point of rhs should be 0}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4xf32>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> -- func.return %0 : tensor<2x4x5xf32> --} -- --// ----- -- --func.func @dot_general_c15_per_axis( -- %arg0: tensor<2x3x4x!quant.uniform>, -- %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{Zero points of rhs should be 0}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4x!quant.uniform>, -- tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> -- func.return %0 : tensor<2x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c16( -- %arg0: tensor<2x3x4x!quant.uniform>, -- %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{Quantization dimension of rhs should not be in the contracting dimension of rhs}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [1], -- rhs_batching_dimensions = [1], -- lhs_contracting_dimensions = [0], -- rhs_contracting_dimensions = [0] -- > -- } : (tensor<2x3x4x!quant.uniform>, -- tensor<2x3x5x!quant.uniform>) -> tensor<3x4x5x!quant.uniform> -- func.return %0 : tensor<3x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c17(%arg0: tensor<2x3x4x!quant.uniform>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{mismatched lhs and rhs quantization storage types}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4x!quant.uniform>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> -- func.return %0 : tensor<2x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c18(%arg0: tensor<2x3x4x!quant.uniform>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{mismatched lhs, rhs and result quantization expressed types}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4x!quant.uniform>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> -- func.return %0 : tensor<2x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c19(%arg0: tensor<2x3x4x!quant.uniform>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{mismatched rhs and result quantization granularity}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4x!quant.uniform>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> -- func.return %0 : tensor<2x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c20(%arg0: tensor<2x3x4xf32>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> { -- // expected-error@+1 {{mismatched rhs quantization expressed type and lhs and result element type}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4xf32>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> -- func.return %0 : tensor<2x4x5xf32> --} -- --// ----- -- - func.func @quantized_element_type_c8(%arg0: tensor<1x2x!quant.uniform:f32, 1.0:300>>) { - // expected-error-re@+1 {{operand #0 must be ranked tensor of {{.*}} 4/8/16/32-bit uniform quantized signed integer or 4/8/16/32-bit uniform quantized unsigned integer or 4/8/16/32-bit uniform quantized per axis signed integer or 4/8/16/32-bit uniform quantized per axis unsigned integer values, but got 'tensor<1x2x!quant.uniform>'}} - %0 = stablehlo.add %arg0, %arg0 : tensor<1x2x!quant.uniform:f32, 1.0:300>> -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,183 +2223,209 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,190 +2223,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,190 +2223,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,190 +2223,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,190 +2223,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -448,8 +484,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -466,8 +503,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -486,8 +524,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -495,8 +534,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -508,8 +548,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -537,8 +578,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -564,15 +606,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -593,8 +636,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -603,8 +647,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -612,8 +657,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -625,8 +671,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -638,8 +685,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -661,8 +709,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -684,8 +733,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -713,8 +763,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -742,8 +793,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -762,29 +814,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -800,8 +856,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -823,8 +880,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -842,22 +900,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -869,8 +930,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -882,8 +944,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -895,15 +958,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -913,8 +978,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -924,9 +990,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -935,22 +1002,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -960,22 +1030,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -987,8 +1060,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1000,15 +1074,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1018,6 +1094,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1029,15 +1106,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1071,8 +1150,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1085,8 +1165,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1096,8 +1177,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1128,15 +1210,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1156,8 +1240,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1167,8 +1252,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1182,8 +1268,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1217,8 +1304,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1238,8 +1326,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1249,22 +1338,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1274,15 +1366,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1292,22 +1386,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1319,8 +1416,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1333,16 +1431,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1364,8 +1463,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1375,8 +1475,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1386,11 +1487,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1401,15 +1503,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1432,36 +1536,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1479,57 +1588,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1539,8 +1656,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1554,36 +1672,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1596,8 +1719,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1612,8 +1736,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1625,8 +1750,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1650,8 +1776,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1677,8 +1804,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1698,16 +1826,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1716,8 +1846,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1727,8 +1858,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1738,8 +1870,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1749,29 +1882,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1801,8 +1938,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1832,15 +1970,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1853,8 +1993,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1864,43 +2005,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1914,8 +2061,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1935,29 +2083,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1969,8 +2121,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1980,8 +2133,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1991,8 +2145,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2008,15 +2163,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2026,22 +2183,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2059,8 +2219,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2068,190 +2229,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_collective_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -431,8 +467,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -478,8 +516,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -498,8 +537,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -507,8 +547,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -520,8 +561,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -549,8 +591,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -576,15 +619,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -605,8 +649,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -615,8 +660,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -624,8 +670,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -637,8 +684,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -650,8 +698,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -673,8 +722,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -696,8 +746,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -725,8 +776,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -754,8 +806,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -774,29 +827,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -812,8 +869,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -835,8 +893,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -854,22 +913,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -881,8 +943,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -894,8 +957,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -907,15 +971,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -925,8 +991,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -936,9 +1003,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -947,22 +1015,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -972,22 +1043,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -999,8 +1073,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1012,15 +1087,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1030,6 +1107,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1041,15 +1119,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1083,8 +1163,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1097,8 +1178,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1108,8 +1190,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1140,15 +1223,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1168,8 +1253,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1179,8 +1265,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1194,8 +1281,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1229,8 +1317,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1250,8 +1339,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1261,22 +1351,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1286,15 +1379,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1304,22 +1399,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1331,8 +1429,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1345,16 +1444,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1376,8 +1476,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1387,8 +1488,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1398,11 +1500,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1413,15 +1516,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1444,36 +1549,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1491,57 +1601,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1551,8 +1669,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1566,36 +1685,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1608,8 +1732,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1624,8 +1749,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1637,8 +1763,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1662,8 +1789,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1689,8 +1817,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1710,16 +1839,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1728,8 +1859,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1739,8 +1871,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1750,8 +1883,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1761,29 +1895,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1813,8 +1951,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1844,15 +1983,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1865,8 +2006,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1876,43 +2018,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1926,8 +2074,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1947,29 +2096,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1981,8 +2134,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1992,8 +2146,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -2003,8 +2158,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2020,15 +2176,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2038,22 +2196,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2071,8 +2232,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2080,190 +2242,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_collective_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -431,8 +467,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -478,8 +516,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -498,8 +537,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -507,8 +547,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -520,8 +561,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -549,8 +591,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -576,15 +619,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -605,8 +649,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -615,8 +660,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -624,8 +670,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -637,8 +684,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -650,8 +698,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -673,8 +722,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -696,8 +746,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -725,8 +776,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -754,8 +806,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -774,29 +827,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -812,8 +869,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -836,7 +894,7 @@ - - // CHECK-LABEL: "op_all_reduce_with_promotable_types" - func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -854,8 +912,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -873,22 +932,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -900,8 +962,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -913,8 +976,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -926,15 +990,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -944,8 +1010,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -955,9 +1022,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -966,22 +1034,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -991,22 +1062,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -1018,8 +1092,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1031,15 +1106,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1049,6 +1126,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1060,15 +1138,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1102,8 +1182,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1116,8 +1197,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1127,8 +1209,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1159,15 +1242,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1187,8 +1272,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1198,8 +1284,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1213,8 +1300,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1248,8 +1336,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1269,8 +1358,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1280,22 +1370,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1305,15 +1398,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1323,22 +1418,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1350,8 +1448,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1364,16 +1463,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1395,8 +1495,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1406,8 +1507,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1417,11 +1519,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1432,15 +1535,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1463,36 +1568,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1510,57 +1620,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1570,8 +1688,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1585,36 +1704,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1627,8 +1751,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1643,8 +1768,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1658,7 +1784,7 @@ - // CHECK_lABEL: "op_reduce_with_promotable_types" - func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) - -> (tensor<4xf64>) { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> -@@ -1673,8 +1799,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1699,7 +1826,7 @@ - - // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" - func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> -@@ -1716,8 +1843,9 @@ - - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1742,11 +1870,11 @@ - func.return %0 : tensor<2x9x16x7xf32> - } - --// CHECK_lABEL: "op_reduce_window_with_promotable_types" -+// CHECK-LABEL: "op_reduce_window_with_promotable_types" - func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, - %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> - (tensor<2x2xf64>, tensor<2x2xf32>) { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) -@@ -1766,8 +1894,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1787,16 +1916,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1805,8 +1936,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1816,8 +1948,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1827,8 +1960,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1838,29 +1972,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1893,7 +2031,7 @@ - func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, - %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> - tensor<200x100x300xf64> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> -@@ -1916,8 +2054,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1947,8 +2086,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () -@@ -1970,15 +2110,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1991,8 +2133,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -2002,43 +2145,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -2052,8 +2201,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -2073,29 +2223,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -2107,8 +2261,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -2118,8 +2273,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -2129,8 +2285,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2146,15 +2303,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2164,22 +2323,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2197,8 +2359,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2206,190 +2369,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_collective_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -431,8 +467,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -478,8 +516,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -498,8 +537,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -507,8 +547,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -520,8 +561,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -549,8 +591,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -576,15 +619,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -605,8 +649,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -615,8 +660,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -624,8 +670,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -637,8 +684,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -650,8 +698,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -673,8 +722,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -696,8 +746,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -725,8 +776,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -754,8 +806,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -774,29 +827,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -812,8 +869,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -836,7 +894,7 @@ - - // CHECK-LABEL: "op_all_reduce_with_promotable_types" - func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -854,8 +912,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -873,22 +932,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -900,8 +962,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -913,8 +976,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -926,15 +990,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -944,8 +1010,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -955,9 +1022,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -966,22 +1034,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -991,22 +1062,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -1018,8 +1092,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1031,15 +1106,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1049,6 +1126,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1060,15 +1138,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1102,8 +1182,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1116,8 +1197,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1127,8 +1209,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1159,15 +1242,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1187,8 +1272,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1198,8 +1284,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1213,8 +1300,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1248,8 +1336,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1269,8 +1358,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1280,22 +1370,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1305,15 +1398,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1323,22 +1418,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1350,8 +1448,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1364,16 +1463,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1395,8 +1495,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1406,8 +1507,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1417,11 +1519,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1432,15 +1535,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1463,36 +1568,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1510,57 +1620,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1570,8 +1688,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1585,36 +1704,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1627,8 +1751,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1643,8 +1768,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1658,7 +1784,7 @@ - // CHECK_lABEL: "op_reduce_with_promotable_types" - func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) - -> (tensor<4xf64>) { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> -@@ -1673,8 +1799,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1699,7 +1826,7 @@ - - // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" - func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> -@@ -1716,8 +1843,9 @@ - - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1746,7 +1874,7 @@ - func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, - %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> - (tensor<2x2xf64>, tensor<2x2xf32>) { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) -@@ -1766,8 +1894,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1787,16 +1916,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1805,8 +1936,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1816,8 +1948,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1827,8 +1960,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1838,29 +1972,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1893,7 +2031,7 @@ - func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, - %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> - tensor<200x100x300xf64> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> -@@ -1916,8 +2054,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1947,8 +2086,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () -@@ -1970,15 +2110,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1991,8 +2133,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -2002,43 +2145,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -2052,8 +2201,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -2073,29 +2223,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -2107,8 +2261,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -2118,8 +2273,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -2129,8 +2285,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2146,15 +2303,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2164,22 +2323,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2197,8 +2359,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2206,197 +2369,225 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_per_tensor_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_per_tensor_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_per_axis_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_per_axis_quantization(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg0) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG0]]) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> - %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform> - func.return %0 : tensor<2x!quant.uniform> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_collective_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -431,8 +467,9 @@ - } - - // CHECK-LABEL: "default_composite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_composite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.composite_v1"(%arg0) <{ -+ // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ - // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{}> - // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> - // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> -@@ -446,8 +483,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -475,8 +513,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -493,8 +532,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -513,8 +553,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -522,8 +563,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -535,8 +577,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -564,8 +607,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -591,15 +635,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -620,8 +665,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -630,8 +676,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -639,8 +686,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -652,8 +700,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -665,8 +714,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -688,8 +738,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -711,8 +762,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -740,8 +792,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -769,8 +822,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -789,29 +843,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -827,8 +885,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -851,7 +910,7 @@ - - // CHECK-LABEL: "op_all_reduce_with_promotable_types" - func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -869,8 +928,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -888,22 +948,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -915,8 +978,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -928,8 +992,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -941,15 +1006,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -959,8 +1026,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -970,9 +1038,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -981,22 +1050,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -1006,22 +1078,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -1033,8 +1108,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1046,15 +1122,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_composite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_composite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.composite_v1"(%arg0) <{ -+ // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ - // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{#vhlo.string_v1<"my_int"> = #vhlo.integer_v1<1 : i64>, #vhlo.string_v1<"my_string"> = #vhlo.string_v1<"foo">}> - // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> - // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> -@@ -1073,8 +1151,9 @@ - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1084,6 +1163,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1095,15 +1175,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1137,8 +1219,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1151,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1162,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1194,15 +1279,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1222,8 +1309,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1233,8 +1321,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1248,8 +1337,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1283,8 +1373,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1304,8 +1395,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1315,22 +1407,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1340,15 +1435,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1358,22 +1455,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1385,8 +1485,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1399,16 +1500,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1430,8 +1532,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1441,8 +1544,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1452,11 +1556,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1467,15 +1572,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1498,36 +1605,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1545,57 +1657,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1605,8 +1725,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1620,36 +1741,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1662,8 +1788,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1678,8 +1805,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1690,10 +1818,10 @@ - func.return %0 : tensor - } - --// CHECK_lABEL: "op_reduce_with_promotable_types" -+// CHECK-LABEL: "op_reduce_with_promotable_types" - func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) - -> (tensor<4xf64>) { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> -@@ -1708,8 +1836,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1734,7 +1863,7 @@ - - // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" - func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> -@@ -1751,8 +1880,9 @@ - - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1781,7 +1911,7 @@ - func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, - %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> - (tensor<2x2xf64>, tensor<2x2xf32>) { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) -@@ -1801,8 +1931,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1822,16 +1953,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1840,8 +1973,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1851,8 +1985,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1862,8 +1997,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1873,29 +2009,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1928,7 +2068,7 @@ - func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, - %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> - tensor<200x100x300xf64> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> -@@ -1951,8 +2091,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1982,8 +2123,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () -@@ -2005,15 +2147,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -2026,8 +2170,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -2037,43 +2182,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -2087,8 +2238,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -2108,29 +2260,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -2142,8 +2298,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -2153,8 +2310,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -2164,8 +2322,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2181,15 +2340,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2199,22 +2360,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2232,8 +2396,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2241,197 +2406,225 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_per_tensor_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_per_tensor_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_per_axis_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_per_axis_quantization(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg0) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG0]]) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> - %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform> - func.return %0 : tensor<2x!quant.uniform> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,169 +2223,193 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_collective_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -431,8 +467,9 @@ - } - - // CHECK-LABEL: "default_composite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_composite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.composite_v1"(%arg0) <{ -+ // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ - // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{}> - // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> - // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> -@@ -446,8 +483,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -475,8 +513,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -493,8 +532,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -513,8 +553,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -522,8 +563,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -535,8 +577,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -564,8 +607,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -591,15 +635,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -620,8 +665,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -630,8 +676,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -639,8 +686,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -652,8 +700,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -665,8 +714,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -688,8 +738,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -711,8 +762,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -740,8 +792,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -769,8 +822,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -789,29 +843,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -827,8 +885,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -851,7 +910,7 @@ - - // CHECK-LABEL: "op_all_reduce_with_promotable_types" - func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -869,8 +928,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -888,22 +948,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -915,8 +978,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -928,8 +992,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -941,15 +1006,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -959,8 +1026,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -970,9 +1038,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -981,22 +1050,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -1006,22 +1078,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -1033,8 +1108,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1046,15 +1122,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_composite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_composite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.composite_v1"(%arg0) <{ -+ // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ - // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{#vhlo.string_v1<"my_int"> = #vhlo.integer_v1<1 : i64>, #vhlo.string_v1<"my_string"> = #vhlo.string_v1<"foo">}> - // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> - // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> -@@ -1073,8 +1151,9 @@ - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1084,6 +1163,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1095,15 +1175,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1137,8 +1219,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1151,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1162,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1194,15 +1279,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1222,8 +1309,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1233,8 +1321,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1248,8 +1337,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1283,8 +1373,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1304,8 +1395,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1315,22 +1407,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1340,15 +1435,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1358,22 +1455,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1385,8 +1485,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1399,16 +1500,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1430,8 +1532,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1441,8 +1544,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1452,11 +1556,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1467,15 +1572,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1498,36 +1605,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1545,57 +1657,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1605,8 +1725,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1620,36 +1741,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1662,8 +1788,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1678,8 +1805,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1693,7 +1821,7 @@ - // CHECK_lABEL: "op_reduce_with_promotable_types" - func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) - -> (tensor<4xf64>) { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> -@@ -1708,8 +1836,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1734,7 +1863,7 @@ - - // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" - func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> -@@ -1751,8 +1880,9 @@ - - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1777,11 +1907,11 @@ - func.return %0 : tensor<2x9x16x7xf32> - } - --// CHECK_lABEL: "op_reduce_window_with_promotable_types" -+// CHECK-LABEL: "op_reduce_window_with_promotable_types" - func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, - %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> - (tensor<2x2xf64>, tensor<2x2xf32>) { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) -@@ -1801,8 +1931,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1822,16 +1953,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1840,8 +1973,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1851,8 +1985,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1862,8 +1997,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1873,29 +2009,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1928,7 +2068,7 @@ - func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, - %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> - tensor<200x100x300xf64> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> -@@ -1951,8 +2091,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1982,8 +2123,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () -@@ -2005,15 +2147,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -2026,8 +2170,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -2037,43 +2182,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -2087,8 +2238,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -2108,29 +2260,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -2142,8 +2298,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -2153,8 +2310,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -2164,8 +2322,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2181,15 +2340,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2199,22 +2360,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2232,8 +2396,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2241,197 +2406,225 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_per_tensor_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_per_tensor_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_per_axis_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_per_axis_quantization(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg0) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG0]]) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> - %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform> - func.return %0 : tensor<2x!quant.uniform> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" +diff --ruN a/stablehlo/stablehlo/reference/Api.cpp b/stablehlo/stablehlo/reference/Api.cpp +--- stablehlo/stablehlo/reference/Api.cpp ++++ stablehlo/stablehlo/reference/Api.cpp +@@ -51,7 +51,7 @@ + auto functions = module.getOps(); + + for (auto funcOp : functions) +- if (funcOp.getSymName().equals(mainName)) return funcOp; ++ if (funcOp.getSymName() == mainName) return funcOp; + + bool isSingleFunction = + std::distance(functions.begin(), functions.end()) == 1; +@@ -68,7 +68,7 @@ + class DefaultInterpreterFallback : public InterpreterFallback { + public: + DefaultInterpreterFallback(const InterpreterConfiguration &config) +- : config(config){}; ++ : config(config) {}; + + virtual llvm::Error operator()(Operation &op, Scope &scope, + Process *process) final { +diff --ruN a/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp b/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp +--- stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp ++++ stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp +@@ -764,7 +764,7 @@ + + // Clean up operand buffers after refinement + // Must do in this pattern to avoid needing multiple refinement iterations +- if (op.getCallTargetName().equals(kCustomCallOperandBarrierTarget)) { ++ if (op.getCallTargetName() == kCustomCallOperandBarrierTarget) { + Value operand = op.getOperand(0); + if (operand.getType() == op.getResult(0).getType()) { + op.replaceAllUsesWith(ValueRange(operand)); diff --git a/third_party/stablehlo/workspace.bzl b/third_party/stablehlo/workspace.bzl index 6a72c8fa16885c..aaef166d96583c 100644 --- a/third_party/stablehlo/workspace.bzl +++ b/third_party/stablehlo/workspace.bzl @@ -4,8 +4,8 @@ load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): # LINT.IfChange - STABLEHLO_COMMIT = "8ba7728d3fdc3ea882e893ee7e53255c95ee0e5a" - STABLEHLO_SHA256 = "1dfc7179dc9200c3ab4ea85edbac4a35393866d8cd8694fcaac00c1d27036408" + STABLEHLO_COMMIT = "797bee217e1a041e9aac22cad4db207274596d94" + STABLEHLO_SHA256 = "e5619033e131ea2eeb9eab8c8e362f3ba12e111c6b4a15dac789ca216ff22c58" # LINT.ThenChange(Google-internal path) tf_http_archive( diff --git a/third_party/tf_runtime/workspace.bzl b/third_party/tf_runtime/workspace.bzl index 8cd9762125eee0..04d0e390c8dfe3 100644 --- a/third_party/tf_runtime/workspace.bzl +++ b/third_party/tf_runtime/workspace.bzl @@ -6,8 +6,8 @@ def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. - TFRT_COMMIT = "7bdf48f1aac0b48ff85a4e0fb5ff7f98a703f8d6" - TFRT_SHA256 = "dc02eeae8f6c3e72bf51cad0f04676eeefabe241389b6f08b8310c999eeb64b3" + TFRT_COMMIT = "edb0d2c6f5e343c83ea121817dc2599ad5453d5c" + TFRT_SHA256 = "97f7bfcbff025da3005e59b9ffe1bcb06b439874e3e2cd28a17d9287193d6901" tf_http_archive( name = "tf_runtime", diff --git a/third_party/triton/temporary/linear_layout_compose_asan.patch b/third_party/triton/temporary/linear_layout_compose_asan.patch new file mode 100644 index 00000000000000..eff83a166ac4a3 --- /dev/null +++ b/third_party/triton/temporary/linear_layout_compose_asan.patch @@ -0,0 +1,18 @@ +==== triton/lib/Tools/LinearLayout.cpp#2 - /google/src/cloud/shyshkov/triton_asan/triton/lib/Tools/LinearLayout.cpp ==== +# action=edit type=text +--- triton/lib/Tools/LinearLayout.cpp 2024-05-17 09:15:25.000000000 -0700 ++++ triton/lib/Tools/LinearLayout.cpp 2024-05-21 06:27:58.000000000 -0700 +@@ -397,9 +397,11 @@ + for (auto [outDim, b] : llvm::zip(getOutDimNames(), basis)) { + bases.push_back({outDim, b}); + } +- auto newBases = llvm::make_second_range(outer.apply(bases)); ++ ++ auto outerBases = ++ llvm::to_vector(llvm::make_second_range(outer.apply(bases))); + newInDimBases.push_back( +- std::vector(newBases.begin(), newBases.end())); ++ std::vector(outerBases.begin(), outerBases.end())); + } + } + return LinearLayout(std::move(newBases), outer.getOutDimNames()); diff --git a/third_party/triton/temporary/pipelining.patch b/third_party/triton/temporary/pipelining.patch deleted file mode 100644 index 9f5f36aeb5099d..00000000000000 --- a/third_party/triton/temporary/pipelining.patch +++ /dev/null @@ -1,472 +0,0 @@ -This is patching changes upstream from different PRs that fix issues with -pipelining internally. Required changes are upto and including this commit -https://github.com/openai/triton/commit/70f0b7b6e333fe2155c79dfa8bec6ad388073670 -The patch can be removed with the integration that includes these changes. - -diff --git a/include/triton/Analysis/Utility.h b/include/triton/Analysis/Utility.h ---- a/include/triton/Analysis/Utility.h -+++ b/include/triton/Analysis/Utility.h -@@ -8,6 +8,18 @@ - - namespace mlir { - -+inline bool isZeroConst(Value v) { -+ auto constantOp = v.getDefiningOp(); -+ if (!constantOp) -+ return false; -+ if (auto denseAttr = dyn_cast(constantOp.getValueAttr())) -+ return denseAttr.isSplat() && denseAttr.getSplatValue().isZero(); -+ if (auto denseAttr = -+ dyn_cast(constantOp.getValueAttr())) -+ return denseAttr.isSplat() && denseAttr.getSplatValue().isZero(); -+ return false; -+} -+ - class ReduceOpHelper { - public: - explicit ReduceOpHelper(triton::ReduceOp op) -diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td b/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td ---- a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td -+++ b/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td -@@ -45,6 +45,8 @@ def TTG_AsyncWaitOp : TTG_Op<"async_wait - - let arguments = (ins Variadic:$asyncToken, I32Attr:$num); - -+ let results = (outs TTG_AsyncToken:$retToken); -+ - let assemblyFormat = "$asyncToken attr-dict"; - - let extraClassDeclaration = [{ -@@ -229,10 +231,16 @@ def TTG_LocalLoadOp : TTG_Op<"local_load - let description = [{ - Load a tensor from the local memory descriptor into a distributed tensor. - }]; -- let arguments = (ins TT_MemDescType:$src); -+ let arguments = (ins TT_MemDescType:$src, Optional :$token); -+ -+ let builders = [ -+ OpBuilder<(ins "Type":$retType, "Value":$src), -+ [{ -+ build($_builder, $_state, retType, src, /*token=*/static_cast(nullptr)); -+ }]>]; - - // Use qualified() otherwise "!tt.memdesc" is printed as "". -- let assemblyFormat = [{$src attr-dict `:` qualified(type($src)) `->` type($result)}]; -+ let assemblyFormat = [{$src (`token` $token^)? attr-dict `:` qualified(type($src)) `->` type($result)}]; - - let results = (outs TT_Tensor:$result); - } -diff --git a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp b/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp ---- a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp -+++ b/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp -@@ -8,6 +8,7 @@ - #include "mlir/Interfaces/SideEffectInterfaces.h" - #include "mlir/Support/LLVM.h" - #include "triton/Analysis/AxisInfo.h" -+#include "triton/Analysis/Utility.h" - #include "triton/Dialect/Triton/IR/Types.h" - #include "triton/Dialect/Triton/IR/Utility.h" - #include "triton/Dialect/TritonGPU/IR/Attributes.h" -@@ -84,12 +85,13 @@ createAsyncCopy(scf::ForOp &forOp, tt::L - Location loc = loadOp.getLoc(); - Value src = loadOp.getPtr(); - Value mask = loadOp.getMask(); -+ Value other = loadOp.getOther(); - if (!isExpensiveLoadOrStore(loadOp) && opToInfo[loadOp].blockedEncoding) { - // For inexpensive loads that do not directly feed into dot ops - // we want to use optimal layout for the data. - ttg::BlockedEncodingAttr encoding = opToInfo[loadOp].blockedEncoding; - auto convertBlockLayout = [&](Value src) { -- auto ty = src.getType().cast(); -+ auto ty = cast(src.getType()); - auto newTy = - RankedTensorType::get(ty.getShape(), ty.getElementType(), encoding); - auto cvt = -@@ -99,9 +101,11 @@ createAsyncCopy(scf::ForOp &forOp, tt::L - src = convertBlockLayout(src); - if (mask) - mask = convertBlockLayout(mask); -+ if (other) -+ other = convertBlockLayout(other); - } - -- tt::MemDescType allocTy = alloc.getType().cast(); -+ tt::MemDescType allocTy = cast(alloc.getType()); - SmallVector copyOffsets(allocTy.getRank(), zero); - copyOffsets[0] = insertIdx; - tt::MemDescType subviewTy = tt::MemDescType::get( -@@ -110,11 +114,12 @@ createAsyncCopy(scf::ForOp &forOp, tt::L - auto view = - builder.create(loc, subviewTy, alloc, copyOffsets); - Operation *copy = builder.create( -- loc, src, view, mask, loadOp.getOther(), loadOp.getCache(), -- loadOp.getEvict(), loadOp.getIsVolatile()); -+ loc, src, view, mask, other, loadOp.getCache(), loadOp.getEvict(), -+ loadOp.getIsVolatile()); - Operation *commmit = - builder.create(loc, copy->getResult(0)); -- builder.create(loc, commmit->getResult(0), 0); -+ Operation *wait = -+ builder.create(loc, commmit->getResult(0), 0); - - int stage = opToInfo[loadOp].stage; - bool isMMV3Load = opToInfo[loadOp].loadIsMMAV3; -@@ -142,9 +147,21 @@ createAsyncCopy(scf::ForOp &forOp, tt::L - for (auto alloc : allocsToErase) { - alloc.erase(); - } -- auto sharedLoad = -- builder.create(loc, loadOp.getType(), viewLoad); -- loadOp->replaceAllUsesWith(sharedLoad->getResults()); -+ -+ auto sharedLoad = builder.create( -+ loc, loadOp.getType(), viewLoad, wait->getResult(0)); -+ auto result = sharedLoad->getResults(); -+ -+ // Create a select for non-zero other values as they are not handled by -+ // AsyncCopyGlobalToLocalOp for now. -+ Value other = loadOp.getOther(); -+ if (other && !isZeroConst(other)) { -+ auto select = builder.create( -+ loc, loadOp.getType(), mask, sharedLoad.getResult(), other); -+ result = select->getResults(); -+ } -+ -+ loadOp->replaceAllUsesWith(result); - } - loadOp.erase(); - } -@@ -160,7 +177,7 @@ getSharedEncIfAllUsersAreDotEnc(Value va - if (user->getNumResults() != 1) - return std::nullopt; - if (auto memDesc = -- user->getResult(0).getType().dyn_cast()) { -+ dyn_cast(user->getResult(0).getType())) { - // First time we find a shared encoding in the chain, save it and try to - // use it if it is compatible with the other users. - tempAttr = memDesc.getEncoding().cast(); -@@ -203,7 +220,7 @@ getSharedEncIfAllUsersAreDotEnc(Value va - static ttg::BlockedEncodingAttr - getBlockedEncoding(tt::LoadOp loadOp, tt::ModuleAxisInfoAnalysis &axisInfo) { - Value src = loadOp.getPtr(); -- auto ty = src.getType().cast(); -+ auto ty = cast(src.getType()); - auto mod = loadOp->getParentOfType(); - int numWarps = ttg::TritonGPUDialect::getNumWarps(mod); - int threadsPerWarp = ttg::TritonGPUDialect::getThreadsPerWarp(mod); -@@ -221,7 +238,7 @@ getBlockedEncoding(tt::LoadOp loadOp, tt - - static std::optional - getSharedEncoding(tt::LoadOp loadOp, bool isMMAV3) { -- auto ty = loadOp.getType().cast(); -+ auto ty = cast(loadOp.getType()); - auto ctaLayout = ttg::getCTALayout(ty.getEncoding()); - auto blockedOrder = ttg::getOrder(ty.getEncoding()); - SmallVector order; -@@ -285,11 +302,10 @@ loadOpsToDistanceAndUse(scf::ForOp forOp - if (auto mask = loadOp.getMask()) - vec = std::min(vec, axisInfoAnalysis.getMaskAlignment(mask)); - -- auto tensorTy = ptr.getType().dyn_cast(); -+ auto tensorTy = dyn_cast(ptr.getType()); - if (!tensorTy) - return false; -- auto ty = -- tensorTy.getElementType().cast().getPointeeType(); -+ auto ty = cast(tensorTy.getElementType()).getPointeeType(); - unsigned width = vec * ty.getIntOrFloatBitWidth(); - - // We do not pipeline all loads for the following reasons: -@@ -353,7 +369,7 @@ static bool loadIsMMAv3(tt::LoadOp loadO - - // MMA V3 case. - auto newOrder = sharedEnc.getOrder(); -- auto ty = loadOp.getType().cast(); -+ auto ty = cast(loadOp.getType()); - auto oldOrder = ttg::getOrder(ty.getEncoding()); - - // The operand of MMAv3 is in SharedEncoding and its order should not -@@ -497,7 +513,7 @@ collectOpsToPipeline(scf::ForOp forOp, - static Value createAlloc(scf::ForOp &forOp, tt::LoadOp loadOp, - ttg::SharedEncodingAttr sharedEnc, unsigned distance) { - OpBuilder builder(forOp); -- auto ty = loadOp.getType().cast(); -+ auto ty = cast(loadOp.getType()); - SmallVector bufferShape(ty.getShape().begin(), ty.getShape().end()); - bufferShape.insert(bufferShape.begin(), distance); - Type memdescType = mlir::triton::MemDescType::get( -@@ -669,12 +685,23 @@ createSchedule(scf::ForOp forOp, int num - } - }); - -+ auto getNestedOperands = [](Operation *op) -> SmallVector { -+ SmallVector operands; -+ op->walk([&](Operation *nestedOp) { -+ for (Value operand : nestedOp->getOperands()) { -+ if (operand.getParentBlock()->getParentOp()->isAncestor(nestedOp)) -+ operands.push_back(operand); -+ } -+ }); -+ return operands; -+ }; -+ - // Find dependencies with distance of 1. - SmallVector> distanceOneUsers(numStages); - for (int stage = 0; stage < numStages - 1; stage++) { - auto &group = insertAndDeps[stage]; - for (Operation *op : group) { -- for (Value operand : op->getOperands()) { -+ for (Value operand : getNestedOperands(op)) { - if (auto arg = operand.dyn_cast()) { - if (arg.getArgNumber() > 0 && arg.getOwner() == op->getBlock()) { - auto yieldOp = op->getBlock()->getTerminator(); -@@ -905,7 +932,7 @@ static int minNumInterleavedCommitOps(Op - // Look for consecutive wait ops and combine them into a single wait op. - static void - combineRedundantWaitOps(llvm::SmallSetVector &waitOps) { -- llvm::SmallSetVector toDelete; -+ llvm::MapVector toDelete; - for (auto waitOp : waitOps) { - if (toDelete.count(waitOp)) - continue; -@@ -927,10 +954,13 @@ combineRedundantWaitOps(llvm::SmallSetVe - OpBuilder builder(waitGroup.back()); - auto newWaitOp = builder.create(waitOp.getLoc(), - depTokens, minWaitNumber); -- toDelete.insert(waitGroup.begin(), waitGroup.end()); -+ for (auto waitOp : waitGroup) { -+ toDelete[waitOp] = newWaitOp; -+ } - } - for (auto waitOp : toDelete) { -- waitOp->erase(); -+ waitOp.first->replaceAllUsesWith(waitOp.second); -+ waitOp.first->erase(); - } - } - -@@ -1010,7 +1040,7 @@ static void threadValuesThroughWait(ttng - - for (ttng::DotAsyncOp dot : asyncDots) { - for (Value operand : dot.getOperands()) { -- if (operand.getType().isa()) { -+ if (isa(operand.getType())) { - newOperands.insert(operand); - } - } -@@ -1020,15 +1050,21 @@ static void threadValuesThroughWait(ttng - // values in the operation. - auto newWait = builder.create( - wait.getLoc(), llvm::to_vector(newOperands), wait.getPendings()); -+ -+ auto dominatedByNewWait = [&](OpOperand &operand) { -+ auto opInThisBlock = -+ newWait->getBlock()->findAncestorOpInBlock(*operand.getOwner()); -+ return opInThisBlock && newWait->isBeforeInBlock(opInThisBlock); -+ }; - for (int i = 0; i < origNumOperands; i++) { - Value operand = wait.getResult(i); -- if (!operand.getType().isa()) -+ if (!isa(operand.getType())) - operand.replaceAllUsesWith(newWait.getResult(i)); - } - for (int i = origNumOperands; i < newOperands.size(); i++) { - Value operand = newWait.getOperand(i); -- if (!operand.getType().isa()) -- operand.replaceAllUsesExcept(newWait.getResult(i), newWait); -+ if (!isa(operand.getType())) -+ operand.replaceUsesWithIf(newWait.getResult(i), dominatedByNewWait); - } - wait->erase(); - } -@@ -1047,8 +1083,8 @@ static void threadValuesThroughWait(ttng - // 1. All operands that touch shared memory are multi-buffered, i.e. can't read - // an incomplete value while it's being written asynchronously by a load. - // --// 2. During iteration i, nothing other than the loop's `yield` reads the --// result of the dot. -+// 2. If the dot is used by any op in the loop, it must be used under an `if`, -+// and will be synced with a `wait 0` at the beginning of the `if` block. - // - // 3. During iteration i, between the start of the loop up until the first - // `ttng.dot_wait {pendings=0}` op, the result of the dot from iteration i-1 -@@ -1079,7 +1115,7 @@ static std::optional dotCanBeProper - // Rule 1: All shmem operands are multi-buffered. - auto checkOperand = [&](Value operand) { - if (!isa( -- operand.getType().cast().getEncoding())) { -+ cast(operand.getType()).getEncoding())) { - return true; - } - -@@ -1103,17 +1139,41 @@ static std::optional dotCanBeProper - return std::nullopt; - } - -- // Rule 2: The dot should only be used by the for loop's `yield`. -- if (!dotOp->hasOneUse() || -- *dotOp->getUsers().begin() != forOp.getBody()->getTerminator()) { -- LDBG("Can't make dot async because it is not used only by the loop's " -- "`yield`."); -- return std::nullopt; -+ // Rule 2: The dot cannot be unconditionally used by any op in the loop. -+ // Uses under `if` are allowed, as can be explicitly synced with a `wait 0`. -+ int iterArgIdx = -1; -+ Value iterArg = nullptr; -+ SmallVector> queue; -+ for (auto &use : dotOp->getUses()) { -+ queue.push_back({use.getOwner(), use.getOperandNumber()}); - } -- -- // The result of the dot becomes this loop carry value. -- auto iterArgIdx = dotOp->getUses().begin()->getOperandNumber(); -- auto iterArg = forOp.getRegionIterArg(iterArgIdx); -+ while (!queue.empty()) { -+ auto [user, argIdx] = queue.pop_back_val(); -+ if (user->getParentOp() == forOp) { -+ if (isa(user)) { -+ if (iterArg) { -+ // The dot is used by the loop's yield, but we can't have any other -+ // uses. -+ return std::nullopt; -+ } -+ iterArgIdx = argIdx; -+ iterArg = forOp.getRegionIterArg(argIdx); -+ continue; -+ } -+ return std::nullopt; -+ } -+ if (auto ifOp = dyn_cast(user->getParentOp())) { -+ if (isa(user)) { -+ // The result is returned by the if, follow it further. -+ auto uses = ifOp.getResult(argIdx).getUses(); -+ for (auto &use : uses) { -+ queue.push_back({use.getOwner(), use.getOperandNumber()}); -+ } -+ } -+ } else { -+ return std::nullopt; -+ } -+ } - - // Rule 3a: Are the only users of the dot's result from iteration i-1 other - // MMAv3 dots? If so, we're done, this dot can be properly async. -@@ -1181,6 +1241,32 @@ static void insertAsyncDotWaitInLoop( - return; - } - -+ // Insert waits before the users of the properly async dots other than loop -+ // yield. -+ for (auto [asyncDot, iterArgIdx] : properlyAsyncDots) { -+ SmallVector uses; -+ for (auto &use : asyncDot->getUses()) { -+ if (auto yieldOp = dyn_cast(use.getOwner())) { -+ continue; -+ } -+ uses.push_back(&use); -+ } -+ -+ DenseMap> blockToUsers; -+ for (auto use : uses) { -+ auto block = use->getOwner()->getBlock(); -+ blockToUsers[block].push_back(use->get()); -+ } -+ -+ for (auto [block, users] : blockToUsers) { -+ OpBuilder builder(block, block->begin()); -+ auto newWait = builder.create(asyncDot->getLoc(), -+ ArrayRef{}, 0); -+ -+ threadValuesThroughWait(newWait, users); -+ } -+ } -+ - // Add the wait right after the last properly-async dot. This only needs to - // wait for all properly-async dots from the i-1'th iteration to complete, IOW - // we wait until there are most `asyncDots.size()` dots in flight. -diff --git a/test/TritonGPU/loop-pipeline.mlir b/test/TritonGPU/loop-pipeline.mlir ---- a/test/TritonGPU/loop-pipeline.mlir -+++ b/test/TritonGPU/loop-pipeline.mlir -@@ -349,16 +349,21 @@ tt.func @indirect_bmm_scalar_dist_one(%7 - // CHECK: triton_gpu.async_copy_global_to_local - // CHECK: triton_gpu.async_copy_global_to_local - // CHECK: triton_gpu.async_commit_group -+// CHECK: triton_gpu.async_wait {{.*}} {num = 1 : i32} -+// CHECK: scf.for -+// CHECK: tt.dot - // CHECK: %[[NEXT_BUFFER_1:.*]] = tt.addptr %{{.*}}, {{.*}} - // CHECK: triton_gpu.async_copy_global_to_local %[[NEXT_BUFFER_1]] --// CHECK: %[[IND_BUFFER_0:.*]] = triton_gpu.memdesc_subview --// CHECK: %[[IND_BUFFER_1:.*]] = triton_gpu.local_load %[[IND_BUFFER_0]] -+// CHECK-DAG: %[[IND_BUFFER_WAIT_TOKEN:.*]] = triton_gpu.async_wait {{.*}} {num = 1 : i32} -+// CHECK-DAG: %[[IND_BUFFER_0:.*]] = triton_gpu.memdesc_subview -+// CHECK: %[[IND_BUFFER_1:.*]] = triton_gpu.local_load %[[IND_BUFFER_0]] token %[[IND_BUFFER_WAIT_TOKEN]] - // CHECK: %[[IND_BUFFER_2:.*]] = tt.expand_dims %[[IND_BUFFER_1]] {axis = 1 : i32} - // CHECK: %[[IND_BUFFER_3:.*]] = tt.broadcast %[[IND_BUFFER_2]] - // CHECK: %[[IND_BUFFER_4:.*]] = arith.muli {{.*}}, %[[IND_BUFFER_3]] - // CHECK: %[[NEXT_BUFFER_0:.*]] = tt.addptr {{.*}}, %[[IND_BUFFER_4]] - // CHECK: triton_gpu.async_copy_global_to_local %[[NEXT_BUFFER_0]] - // CHECK: triton_gpu.async_wait {{.*}} {num = 1 : i32} -+// CHECK: scf.yield - tt.func @indirect_bmm_vector(%77: tensor<16x16xi64, #BL> {tt.divisibility=16: i32, tt.constancy=16: i32}, - %76: index, - %49: tensor<16x16x!tt.ptr, #AL> {tt.divisibility=16: i32, tt.contiguity=2 : i32}, -diff --git a/test/TritonGPU/reorder-instructions.mlir b/test/TritonGPU/reorder-instructions.mlir ---- a/test/TritonGPU/reorder-instructions.mlir -+++ b/test/TritonGPU/reorder-instructions.mlir -@@ -28,7 +28,7 @@ module attributes {"triton_gpu.num-warps - // CHECK: triton_gpu.async_wait {num = 0 : i32} - // CHECK: triton_gpu.local_dealloc %0 : !tt.memdesc<4x128x64xf16, #shared> - // CHECK: triton_gpu.local_dealloc %1 : !tt.memdesc<4x128x64xf16, #shared> --// CHECK: %2 = triton_gpu.convert_layout %arg0 : tensor<32x32xf32, #blocked> -> tensor<32x32xf32, #blocked1> -+// CHECK: %3 = triton_gpu.convert_layout %arg0 : tensor<32x32xf32, #blocked> -> tensor<32x32xf32, #blocked1> - #blocked = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [1, 4], order = [0, 1]}> - #blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [1, 4], order = [1, 0]}> - #shared = #triton_gpu.shared<{vec = 8, perPhase = 1, maxPhase = 4, order = [0, 1]}> -diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp ---- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp -+++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp -@@ -333,17 +333,6 @@ static Value faddAccumulate(ConversionPa - return newStruct; - } - --static bool isZero(Value v) { -- auto constantOp = v.getDefiningOp(); -- if (!constantOp) -- return false; -- if (auto denseAttr = dyn_cast(constantOp.getValueAttr())) -- return denseAttr.isSplat() && denseAttr.getSplatValue().isZero(); -- if (auto denseAttr = -- dyn_cast(constantOp.getValueAttr())) -- return denseAttr.isSplat() && denseAttr.getSplatValue().isZero(); -- return false; --} - - static SmallVector emitWait(ConversionPatternRewriter &rewriter, - Location loc, SmallVector acc, -@@ -402,7 +391,7 @@ LogicalResult convertDot(const LLVMTypeC - int M = 4 * instrShape[0]; - int N = instrShape[1]; - int K = instrShape[2]; -- bool zeroAcc = isZero(c); -+ bool zeroAcc = isZeroConst(c); - auto shapePerCTATile = getShapePerCTATile(mmaEncoding); - int numRepM = ceil(dShapePerCTA[0], shapePerCTATile[0]); - int numRepN = ceil(dShapePerCTA[1], shapePerCTATile[1]); -diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp ---- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp -+++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp -@@ -924,8 +924,11 @@ struct AsyncWaitOpConversion - auto voidTy = void_ty(ctx); - ptxBuilder.launch(rewriter, loc, voidTy); - -- // Safe to remove the op since it doesn't have any return value. -- rewriter.eraseOp(op); -+ // Drop the result token. -+ Value zero = rewriter.create( -+ op.getLoc(), IntegerType::get(op.getContext(), 32), -+ rewriter.getI32IntegerAttr(0)); -+ rewriter.replaceOp(op, zero); - return success(); - } - }; diff --git a/third_party/triton/temporary/series.bzl b/third_party/triton/temporary/series.bzl index b6002f83e1d7bb..b3d935c048fadd 100644 --- a/third_party/triton/temporary/series.bzl +++ b/third_party/triton/temporary/series.bzl @@ -6,6 +6,5 @@ internal patch during the next triton integration process. """ temporary_patch_list = [ - "//third_party/triton/temporary:pipelining.patch", - "//third_party/triton/temporary:support_ceil_op.patch", + "//third_party/triton/temporary:linear_layout_compose_asan.patch", ] diff --git a/third_party/triton/temporary/support_ceil_op.patch b/third_party/triton/temporary/support_ceil_op.patch deleted file mode 100644 index 71b323d9fccdca..00000000000000 --- a/third_party/triton/temporary/support_ceil_op.patch +++ /dev/null @@ -1,138 +0,0 @@ -Cherry-picking https://github.com/openai/triton/commit/62706e8c518c8c56e56460a43732d8e375217860 -until the next integration lands it. Can be removed as it is already merged. - -diff --git a/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp b/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp ---- a/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp -+++ b/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp -@@ -805,6 +805,7 @@ void mlir::triton::populateElementwiseOp - POPULATE_UNARY_OP(arith::FPToUIOp, LLVM::FPToUIOp) - POPULATE_UNARY_OP(arith::UIToFPOp, LLVM::UIToFPOp) - POPULATE_UNARY_OP(math::FloorOp, math::FloorOp) -+ POPULATE_UNARY_OP(math::CeilOp, math::CeilOp) - POPULATE_UNARY_OP(math::LogOp, math::LogOp) - POPULATE_UNARY_OP(math::Log2Op, math::Log2Op) - POPULATE_UNARY_OP(math::CosOp, math::CosOp) -diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp ---- a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp -+++ b/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp -@@ -125,12 +125,13 @@ void populateMathPatternsAndLegality(Tri - MLIRContext *context = patterns.getContext(); - // Rewrite rule - patterns.add, GenericOpPattern, -- GenericOpPattern, GenericOpPattern, -- GenericOpPattern, GenericOpPattern, -- GenericOpPattern, GenericOpPattern, -- GenericOpPattern, GenericOpPattern, -- GenericOpPattern, GenericOpPattern, -- GenericOpPattern>(typeConverter, context); -+ GenericOpPattern, GenericOpPattern, -+ GenericOpPattern, GenericOpPattern, -+ GenericOpPattern, GenericOpPattern, -+ GenericOpPattern, GenericOpPattern, -+ GenericOpPattern, GenericOpPattern, -+ GenericOpPattern, GenericOpPattern>( -+ typeConverter, context); - } - - // -diff --git a/lib/Dialect/TritonNvidiaGPU/Transforms/PlanCTA.cpp b/lib/Dialect/TritonNvidiaGPU/Transforms/PlanCTA.cpp ---- a/lib/Dialect/TritonNvidiaGPU/Transforms/PlanCTA.cpp -+++ b/lib/Dialect/TritonNvidiaGPU/Transforms/PlanCTA.cpp -@@ -651,10 +651,9 @@ bool CTAPlanner::isElementwiseOp(Operati - math::CeilOp, math::CopySignOp, math::CosOp, math::SinOp, - math::CountLeadingZerosOp, math::CountTrailingZerosOp, - math::CtPopOp, math::ErfOp, math::ExpOp, math::Exp2Op, -- math::FloorOp, math::ExpM1Op, math::FloorOp, math::FmaOp, -- math::LogOp, math::Log10Op, math::Log1pOp, math::Log2Op, -- math::PowFOp, math::RsqrtOp, math::SqrtOp, math::RsqrtOp, -- math::TanhOp>(op)) -+ math::FloorOp, math::ExpM1Op, math::FmaOp, math::LogOp, -+ math::Log10Op, math::Log1pOp, math::Log2Op, math::PowFOp, -+ math::RsqrtOp, math::SqrtOp, math::RsqrtOp, math::TanhOp>(op)) - return true; - if (llvm::isa Value { - return self.create(val); - }) -+ .def("create_ceil", -+ [](TritonOpBuilder &self, Value &val) -> Value { -+ return self.create(val); -+ }) - .def("create_exp", - [](TritonOpBuilder &self, Value &val) -> Value { - return self.create(val); -diff --git a/python/test/unit/language/test_core.py b/python/test/unit/language/test_core.py ---- a/python/test/unit/language/test_core.py -+++ b/python/test/unit/language/test_core.py -@@ -915,10 +915,11 @@ def test_unary_op(dtype_x, expr, num_cta - - - @pytest.mark.interpreter --@pytest.mark.parametrize("dtype_x, expr, x", [(dtype_x, expr, x) -- for dtype_x in ["float32", "float64"] -- for expr in ['exp', 'log', 'cos', 'sin', 'exp2', 'log2', 'sqrt', 'floor'] -- for x in ['x', '3.0']]) -+@pytest.mark.parametrize("dtype_x, expr, x", -+ [(dtype_x, expr, x) -+ for dtype_x in ["float32", "float64"] -+ for expr in ['exp', 'log', 'cos', 'sin', 'exp2', 'log2', 'sqrt', 'floor', 'ceil'] -+ for x in ['x', '3.0']]) - def test_math_op(dtype_x, expr, x, device): - _test_unary(dtype_x, f'tl.{expr}({x})', f'np.{expr}({x}) ', device=device) - -diff --git a/python/triton/language/__init__.py b/python/triton/language/__init__.py ---- a/python/triton/language/__init__.py -+++ b/python/triton/language/__init__.py -@@ -102,7 +102,8 @@ from .core import ( - void, - where, - ) --from .math import (umulhi, exp, exp2, fma, log, log2, cos, rsqrt, sin, sqrt, sqrt_rn, abs, fdiv, div_rn, erf, floor) -+from .math import (umulhi, exp, exp2, fma, log, log2, cos, rsqrt, sin, sqrt, sqrt_rn, abs, fdiv, div_rn, erf, floor, -+ ceil) - from .random import ( - pair_uniform_to_normal, - philox, -@@ -142,6 +143,7 @@ from .random import ( - "builtin", - "cat", - "cdiv", -+ "ceil", - "clamp", - "const", - "const_pointer_type", -diff --git a/python/triton/language/math.py b/python/triton/language/math.py ---- a/python/triton/language/math.py -+++ b/python/triton/language/math.py -@@ -230,6 +230,15 @@ def floor(x, _builder=None): - - - @core.builtin -+@_check_dtype(dtypes=["fp32", "fp64"]) -+@_add_math_1arg_docstr("ceil") -+@core._tensor_member_fn -+def ceil(x, _builder=None): -+ x = core._to_tensor(x, _builder) -+ return core.tensor(_builder.create_ceil(x.handle), x.type) -+ -+ -+@core.builtin - @_add_math_3arg_docstr("fused multiply-add") - def fma(x, y, z, _builder=None): - x = core._to_tensor(x, _builder) -diff --git a/python/triton/runtime/interpreter.py b/python/triton/runtime/interpreter.py ---- a/python/triton/runtime/interpreter.py -+++ b/python/triton/runtime/interpreter.py -@@ -391,6 +391,7 @@ class InterpreterBuilder: - create_fabs = lambda self, arg: self.unary_op(arg, np.abs) - create_iabs = lambda self, arg: self.unary_op(arg, np.abs) - create_floor = lambda self, arg: self.unary_op(arg, np.floor) -+ create_ceil = lambda self, arg: self.unary_op(arg, np.ceil) - create_log = lambda self, arg: self.unary_op(arg, np.log) - create_log2 = lambda self, arg: self.unary_op(arg, np.log2) - create_precise_sqrt = lambda self, arg: self.unary_op(arg, np.sqrt) diff --git a/third_party/triton/workspace.bzl b/third_party/triton/workspace.bzl index 45daf7974a022e..a257f1f3e44645 100644 --- a/third_party/triton/workspace.bzl +++ b/third_party/triton/workspace.bzl @@ -8,8 +8,8 @@ load("//third_party/triton/xla_extensions:series.bzl", "extensions_files_patch_l def repo(): """Imports Triton.""" - TRITON_COMMIT = "cl623533461" - TRITON_SHA256 = "7aa74e82e4417a91fc7a7a84b4f6ad2b7e4e58512758d6c78ca3cd1c8771326b" + TRITON_COMMIT = "cl634675237" + TRITON_SHA256 = "7151d057ee8443c2f45cbe18a7435a42f37e18f562e5d238b844b6e09fc560e6" tf_http_archive( name = "triton", sha256 = TRITON_SHA256, diff --git a/third_party/triton/xla_extensions/env_vars.patch b/third_party/triton/xla_extensions/env_vars.patch deleted file mode 100644 index 955eb6db8da68e..00000000000000 --- a/third_party/triton/xla_extensions/env_vars.patch +++ /dev/null @@ -1,14 +0,0 @@ -Long standing patch due to licensing issues. -diff --git a/include/triton/Tools/Sys/GetEnv.hpp b/include/triton/Tools/Sys/GetEnv.hpp -index 31bc03fe1..a19a432df 100644 ---- a/include/triton/Tools/Sys/GetEnv.hpp -+++ b/include/triton/Tools/Sys/GetEnv.hpp -@@ -34,7 +34,7 @@ inline const std::set ENV_VARS = { - "AMDGCN_ENABLE_DUMP", - "DISABLE_FAST_REDUCTION", - "DISABLE_LLVM_OPT", -- "DISABLE_MMA_V3", -+ "ENABLE_MMA_V3", - "DISABLE_PTXAS_OPT", - "LLVM_IR_ENABLE_DUMP", - "MLIR_ENABLE_DUMP", diff --git a/third_party/triton/xla_extensions/series.bzl b/third_party/triton/xla_extensions/series.bzl index b858da203fb094..af524fb253cbef 100644 --- a/third_party/triton/xla_extensions/series.bzl +++ b/third_party/triton/xla_extensions/series.bzl @@ -4,7 +4,6 @@ applied in the previous copybara workflow. """ extensions_files_patch_list = [ - "//third_party/triton/xla_extensions:env_vars.patch", # File not exported to google "//third_party/triton/xla_extensions:sparse_dot_nvgpu.patch", # Sparsity internal patch "//third_party/triton/xla_extensions:sparse_dot_base.patch", # Sparsity internal patch "//third_party/triton/xla_extensions:sparse_dot_passes.patch", # Sparsity internal patch diff --git a/third_party/triton/xla_extensions/sparse_dot_base.patch b/third_party/triton/xla_extensions/sparse_dot_base.patch index dcacd99740b18f..08b7dd6f7ada87 100644 --- a/third_party/triton/xla_extensions/sparse_dot_base.patch +++ b/third_party/triton/xla_extensions/sparse_dot_base.patch @@ -1,8 +1,9 @@ diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td b/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td +index 56f0b6b49..aa91ea9b8 100644 --- a/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td +++ b/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td -@@ -1158,4 +1158,12 @@ section 9.7.13.4.1 for more details. - let extraClassDeclaration = extraDistributedDeclaration; +@@ -1262,4 +1262,16 @@ section 9.7.13.4.1 for more details. + }]; } +def SparseDotMetaEncodingAttr : DistributedEncoding<"SparseDotMetaEncoding", "sparse_dot_meta_encoding"> { @@ -10,14 +11,19 @@ diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td b/include/ + + let parameters = (ins "Attribute":$parent); + let assemblyFormat = "`<``{` struct(params) `}``>`"; -+ let extraClassDeclaration = extraDistributedDeclaration; ++ let extraClassDeclaration = extraDistributedDeclaration # [{ ++ SmallVector getContigPerThread() { ++ return getSizePerThread(); ++ }; ++ }]; +} + #endif diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td b/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td +index 4966a5f73..d2bb33cfa 100644 --- a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td +++ b/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td -@@ -7,6 +7,7 @@ include "triton/Dialect/TritonGPU/IR/Tri +@@ -7,6 +7,7 @@ include "triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td" include "mlir/Dialect/Arith/IR/ArithBase.td" include "triton/Dialect/Triton/IR/TritonTypes.td" include "triton/Dialect/Triton/IR/TritonAttrDefs.td" @@ -25,8 +31,8 @@ diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td b/include/trito include "mlir/IR/OpBase.td" include "mlir/Interfaces/SideEffectInterfaces.td" // Pure include "mlir/Interfaces/InferTypeOpInterface.td" // SameOperandsAndResultType -@@ -214,4 +215,19 @@ def TTG_LocalLoadOp : TTG_Op<"local_load - let results = (outs TT_Tensor:$result); +@@ -232,4 +233,19 @@ def TTG_LocalStoreOp : TTG_Op<"local_store", [MemoryEffects<[MemWrite shape, return encoding; } @@ -70,7 +77,7 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia + +LogicalResult SparseDotOp::verify() { + // Verify operand A. -+ auto aTensorTy = getOperand(0).getType().cast(); ++ auto aTensorTy = cast(getOperand(0).getType()); + auto aElemTy = aTensorTy.getElementType(); + if (!aElemTy.isF16() && !aElemTy.isBF16()) + return emitError("element type of operand A is not supported"); @@ -78,7 +85,7 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia + if (aShape.size() != 2) return emitError("shape of operand A is incorrect"); + + // Verify operand B. -+ auto bTensorTy = getOperand(1).getType().cast(); ++ auto bTensorTy = cast(getOperand(1).getType()); + auto bElemTy = bTensorTy.getElementType(); + if (!bElemTy.isF16() && !bElemTy.isBF16()) + return emitError("element type of operand B is not supported"); @@ -86,7 +93,7 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia + if (bShape.size() != 2) return emitError("shape of operand B is incorrect"); + + // Verify operand C. -+ auto cTensorTy = getOperand(2).getType().cast(); ++ auto cTensorTy = cast(getOperand(2).getType()); + auto cElemTy = cTensorTy.getElementType(); + if (!cElemTy.isF32()) + return emitError("element type of operand C is not supported"); @@ -101,7 +108,7 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia + return emitError("operand element types do not match"); + + // Verify sparse metadata. -+ auto metaTy = getOperand(3).getType().cast(); ++ auto metaTy = cast(getOperand(3).getType()); + auto metaShape = metaTy.getShape(); + if (!metaTy.getElementType().isInteger(16) || metaShape.size() != 2) + return emitError("sparse metadata tensor is invalid"); @@ -125,7 +132,7 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia +//--- SparseDotMetaEncodingAttr --- +unsigned SparseDotMetaEncodingAttr::getTotalElemsPerThread( + ArrayRef shape, Type eltTy) const { -+ auto mmaLayout = getParent().cast(); ++ auto mmaLayout = mlir::cast(getParent()); + return product(shape) / + (mmaLayout.getWarpsPerCTA()[0] * kMetadataElementsPerWarp); +} @@ -169,9 +176,10 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia } // namespace triton } // namespace mlir diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM.cpp +index f8ece0f1c..435610817 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM.cpp -@@ -38,6 +38,14 @@ Value convertLayout(int opIdx, Conversio +@@ -43,6 +43,14 @@ Value convertLayout(int opIdx, ConversionPatternRewriter &rewriter, const LLVMTypeConverter *typeConverter, Value thread); } @@ -185,19 +193,19 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM. + namespace { - struct LocalLoadOpConversion -@@ -59,6 +67,10 @@ public: - .isa()) { + using namespace mlir; +@@ -67,6 +75,10 @@ public: + cast(dstLayout).getParent())) { return lowerSharedToDotOperand(op, adaptor, getTypeConverter(), rewriter); } -+ if (srcLayout.isa() && -+ dstLayout.isa()) { ++ if (isa(srcLayout) && ++ isa(dstLayout)) { + return lowerSharedToSparseMeta(op, adaptor, getTypeConverter(), rewriter); + } return failure(); } -@@ -130,6 +142,29 @@ private: +@@ -138,6 +150,26 @@ private: rewriter.replaceOp(op, res); return success(); } @@ -208,13 +216,10 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM. + const LLVMTypeConverter *typeConverter, + ConversionPatternRewriter &rewriter) const { + auto loc = op.getLoc(); -+ auto sparseEncoding = op.getResult() -+ .getType() -+ .cast() -+ .getEncoding() -+ .cast(); ++ auto sparseEncoding = cast( ++ cast(op.getResult().getType()).getEncoding()); + auto llvmElemTy = typeConverter->convertType( -+ op.getSrc().getType().cast().getElementType()); ++ cast(op.getSrc().getType()).getElementType()); + auto smemObj = getSharedMemoryObjectFromStruct(loc, adaptor.getSrc(), + llvmElemTy, rewriter); + Value res = SharedToSparseDotOperand::convertLayout( @@ -229,6 +234,7 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM. struct ConvertLayoutOpOptimizedConversion diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp new file mode 100644 +index 000000000..3011cf73d --- /dev/null +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp @@ -0,0 +1,69 @@ @@ -255,7 +261,7 @@ new file mode 100644 + Value thread) { + // Calculate tile size as number of mask elements (4xi4). + NvidiaMmaEncodingAttr mmaLayout = -+ sparseEncoding.getParent().cast(); ++ cast(sparseEncoding.getParent()); + SmallVector shapePerCTATile = { + kTileSize * mmaLayout.getWarpsPerCTA()[0], + kTileSize / kMetadataElementsPerPackedValue}; @@ -272,7 +278,7 @@ new file mode 100644 + Value rowId = add(mul(warpGroupId, i32_val(kTileSize)), laneGroupId); + + // Calculate number of tile repetitions. -+ auto shape = tensor.getType().cast().getShape(); ++ auto shape = cast(tensor.getType()).getShape(); + int repM = shape[0] / shapePerCTATile[0]; + int repK = shape[1] / shapePerCTATile[1]; + assert(repM > 0 && repK > 0); @@ -302,9 +308,10 @@ new file mode 100644 +} +} // namespace SharedToSparseDotOperand diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp +index 374b9ec9e..1601806b4 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp -@@ -32,6 +32,12 @@ LogicalResult convertAsyncWGMMA(triton:: +@@ -32,6 +32,12 @@ LogicalResult convertAsyncWGMMA(triton::nvidia_gpu::DotAsyncOp op, const LLVMTypeConverter *typeConverter, ConversionPatternRewriter &rewriter, Value thread); @@ -317,7 +324,7 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp b/thir namespace { struct DotOpConversion : public ConvertOpToLLVMPattern { using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; -@@ -180,6 +186,18 @@ struct DotWaitOpConversion +@@ -174,6 +180,18 @@ struct DotWaitOpConversion return success(); } }; @@ -336,7 +343,7 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp b/thir } // namespace void mlir::triton::NVIDIA::populateDotOpToLLVMPatterns( -@@ -188,4 +206,5 @@ void mlir::triton::NVIDIA::populateDotOp +@@ -182,4 +200,5 @@ void mlir::triton::NVIDIA::populateDotOpToLLVMPatterns( patterns.add(typeConverter, benefit); patterns.add(typeConverter, benefit); patterns.add(typeConverter, benefit); @@ -344,6 +351,7 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp b/thir } diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/Sparse.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/Sparse.cpp new file mode 100644 +index 000000000..34d9212d2 --- /dev/null +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/Sparse.cpp @@ -0,0 +1,339 @@ @@ -397,15 +405,15 @@ new file mode 100644 + const LLVMTypeConverter *typeConverter, + ConversionPatternRewriter &rewriter) { + // Get number of repetitions across the dimensions. -+ auto aTensorTy = op.getA().getType().cast(); -+ auto bTensorTy = op.getB().getType().cast(); ++ auto aTensorTy = cast(op.getA().getType()); ++ auto bTensorTy = cast(op.getB().getType()); + -+ auto layoutA = aTensorTy.getEncoding().dyn_cast(); -+ auto layoutB = bTensorTy.getEncoding().dyn_cast(); ++ auto layoutA = dyn_cast(aTensorTy.getEncoding()); ++ auto layoutB = dyn_cast(bTensorTy.getEncoding()); + assert(layoutA != nullptr && layoutB != nullptr); + + int bitwidth = aTensorTy.getElementType().getIntOrFloatBitWidth(); -+ auto mmaEnc = layoutA.getParent().cast(); ++ auto mmaEnc = cast(layoutA.getParent()); + auto repA = mmaEnc.getMMAv2Rep(triton::gpu::getShapePerCTA(aTensorTy), + bitwidth, layoutA.getOpIdx()); + auto repB = mmaEnc.getMMAv2Rep(triton::gpu::getShapePerCTA(bTensorTy), @@ -435,7 +443,7 @@ new file mode 100644 + } + + // Flatten accumulator values. -+ auto dTensorTy = op.getD().getType().cast(); ++ auto dTensorTy = cast(op.getD().getType()); + auto fc = unpackLLElements(loc, adaptor.getC(), rewriter); + + // Create `mma.sp` instruction for 4/8 core matrices. @@ -512,7 +520,7 @@ new file mode 100644 + Location loc, std::vector instrShape, + bool trans, int dimWpt, Value warpId, MemDescType tensorTy, + Value baseDesc, int minor) { -+ auto sharedLayout = tensorTy.getEncoding().cast(); ++ auto sharedLayout = cast(tensorTy.getEncoding()); + int elemBytes = tensorTy.getElementTypeBitWidth() / 8; + int elemsPerSwizzlingRow = + kMmaLineSize / sharedLayout.getPerPhase() / elemBytes; @@ -541,10 +549,10 @@ new file mode 100644 + ConversionPatternRewriter &rewriter, + Value thread) { + // Get number of repetitions across the dimensions. -+ auto aTensorTy = op.getA().getType().cast(); -+ auto bTensorTy = op.getB().getType().cast(); -+ auto dTensorTy = op.getD().getType().cast(); -+ auto mmaEnc = dTensorTy.getEncoding().cast(); ++ auto aTensorTy = cast(op.getA().getType()); ++ auto bTensorTy = cast(op.getB().getType()); ++ auto dTensorTy = cast(op.getD().getType()); ++ auto mmaEnc = cast(dTensorTy.getEncoding()); + + auto shapePerCTA = getShapePerCTA(dTensorTy); + auto shapePerCTATile = getShapePerCTATile(mmaEnc); @@ -573,7 +581,7 @@ new file mode 100644 + auto sharedObj = getSharedMemoryObjectFromStruct( + loc, arg, typeConverter->convertType(tensorTy.getElementType()), + rewriter); -+ auto sharedLayout = tensorTy.getEncoding().cast(); ++ auto sharedLayout = cast(tensorTy.getEncoding()); + auto shape = getShapePerCTA(tensorTy); + auto ord = sharedLayout.getOrder(); + int byteSize = aTensorTy.getElementTypeBitWidth() / 8; @@ -671,9 +679,9 @@ new file mode 100644 + SparseDotOp::Adaptor adaptor, + const LLVMTypeConverter *typeConverter, + ConversionPatternRewriter &rewriter) { -+ auto resultTy = op.getResult().getType().cast(); ++ auto resultTy = cast(op.getResult().getType()); + NvidiaMmaEncodingAttr mmaLayout = -+ resultTy.getEncoding().cast(); ++ cast(resultTy.getEncoding()); + + if (mmaLayout.isAmpere()) { + return convertSparseMMA(op, adaptor, typeConverter, rewriter); @@ -687,9 +695,10 @@ new file mode 100644 + "Unsupported SparseDotOp found when converting TritonGPU to LLVM."); +} diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp +index 738f0fe04..867939f65 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp -@@ -87,8 +87,8 @@ int64_t getSwizzlingFromLayout(const Sha +@@ -88,8 +88,8 @@ int64_t getSwizzlingFromLayout(const SharedEncodingAttr &layout, return swizzlingByteWidth; } diff --git a/third_party/triton/xla_extensions/sparse_dot_fixes_y24w17.patch b/third_party/triton/xla_extensions/sparse_dot_fixes_y24w17.patch index 9d1ae2e91cae3f..ce009aa688e9bf 100644 --- a/third_party/triton/xla_extensions/sparse_dot_fixes_y24w17.patch +++ b/third_party/triton/xla_extensions/sparse_dot_fixes_y24w17.patch @@ -1,30 +1,8 @@ -diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp ---- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp -+++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp -@@ -22,16 +22,16 @@ Value convertLayout( - // Calculate tile size as number of mask elements (4xi4). - NvidiaMmaEncodingAttr mmaLayout = - sparseEncoding.getParent().cast(); -+ SmallVector warpsPerCTA = mmaLayout.getWarpsPerCTA(); - SmallVector shapePerCTATile = { -- kTileSize * mmaLayout.getWarpsPerCTA()[0], -- kTileSize / kMetadataElementsPerPackedValue}; -+ kTileSize * warpsPerCTA[0], kTileSize / kMetadataElementsPerPackedValue}; - Value strideM = smemObj.strides[0]; - Value strideK = smemObj.strides[1]; - - // Calculate offset in the tile for the current thread. - Value threadsPerWarp = i32_val(kThreadsPerWarp); - Value warpId = udiv(thread, threadsPerWarp); -- Value warpGroupId = urem(warpId, i32_val(shapePerCTATile[0] / kTileSize)); -+ Value warpGroupId = udiv(warpId, i32_val(warpsPerCTA[1])); - Value laneId = urem(thread, threadsPerWarp); - Value laneGroupId = udiv(laneId, i32_val(kThreadsInGroup)); - Value columnId = urem(laneId, i32_val(shapePerCTATile[1])); diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp +index 0516fc56f..1f27f8a43 100644 --- a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp +++ b/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp -@@ -139,6 +139,7 @@ class BlockedToMMA : public mlir::Rewrit +@@ -142,6 +142,7 @@ class BlockedToMMA : public mlir::RewritePattern { mlir::TypeID::get()); } @@ -32,7 +10,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect // Finds the first different bitwidth in the chain of shape-preserving // unary ops that x depends on. // There are two primary scenarios: -@@ -172,7 +173,6 @@ class BlockedToMMA : public mlir::Rewrit +@@ -175,7 +176,6 @@ class BlockedToMMA : public mlir::RewritePattern { return origBitWidth; } @@ -40,7 +18,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect BlockedToMMA(mlir::MLIRContext *context, int computeCapability) : mlir::RewritePattern(tt::DotOp::getOperationName(), 2, context), computeCapability(computeCapability) {} -@@ -388,18 +388,22 @@ class SparseBlockedToMMA : public mlir:: +@@ -389,18 +389,22 @@ class SparseBlockedToMMA : public mlir::RewritePattern { newRetType, oldAcc); if (versionMajor == 2) { @@ -49,7 +27,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + int kWidth = 32 / minBitwidth; + // convert A operand - auto oldAType = a.getType().cast(); + auto oldAType = cast(a.getType()); - auto newAEncoding = ttg::DotOperandEncodingAttr::get( - ctx, 0, mmaEnc, oldAType.getElementType()); + auto newAEncoding = @@ -59,7 +37,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect a = rewriter.create(a.getLoc(), newAType, a); // convert B operand - auto oldBType = b.getType().cast(); + auto oldBType = cast(b.getType()); - auto newBEncoding = ttg::DotOperandEncodingAttr::get( - ctx, 1, mmaEnc, oldBType.getElementType()); + auto newBEncoding = @@ -67,3 +45,27 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect auto newBType = RankedTensorType::get( oldBType.getShape(), oldBType.getElementType(), newBEncoding); b = rewriter.create(b.getLoc(), newBType, b); +diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp +index 3011cf73d..ea587dced 100644 +--- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp ++++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp +@@ -22,16 +22,16 @@ Value convertLayout( + // Calculate tile size as number of mask elements (4xi4). + NvidiaMmaEncodingAttr mmaLayout = + cast(sparseEncoding.getParent()); ++ SmallVector warpsPerCTA = mmaLayout.getWarpsPerCTA(); + SmallVector shapePerCTATile = { +- kTileSize * mmaLayout.getWarpsPerCTA()[0], +- kTileSize / kMetadataElementsPerPackedValue}; ++ kTileSize * warpsPerCTA[0], kTileSize / kMetadataElementsPerPackedValue}; + Value strideM = smemObj.strides[0]; + Value strideK = smemObj.strides[1]; + + // Calculate offset in the tile for the current thread. + Value threadsPerWarp = i32_val(kThreadsPerWarp); + Value warpId = udiv(thread, threadsPerWarp); +- Value warpGroupId = urem(warpId, i32_val(shapePerCTATile[0] / kTileSize)); ++ Value warpGroupId = udiv(warpId, i32_val(warpsPerCTA[1])); + Value laneId = urem(thread, threadsPerWarp); + Value laneGroupId = udiv(laneId, i32_val(kThreadsInGroup)); + Value columnId = urem(laneId, i32_val(shapePerCTATile[1])); diff --git a/third_party/triton/xla_extensions/sparse_dot_fixes_y24w19.patch b/third_party/triton/xla_extensions/sparse_dot_fixes_y24w19.patch index 8ac91d153690fd..775ed317d1f9b9 100644 --- a/third_party/triton/xla_extensions/sparse_dot_fixes_y24w19.patch +++ b/third_party/triton/xla_extensions/sparse_dot_fixes_y24w19.patch @@ -11,3 +11,21 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect // get MMA encoding for the given number of warps auto retShapePerCTA = ttg::getShapePerCTA(oldRetType); +diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp +--- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp ++++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp +@@ -31,7 +31,13 @@ Value convertLayout( + // Calculate offset in the tile for the current thread. + Value threadsPerWarp = i32_val(kThreadsPerWarp); + Value warpId = udiv(thread, threadsPerWarp); +- Value warpGroupId = udiv(warpId, i32_val(warpsPerCTA[1])); ++ Value warpGroupId; ++ if (mmaLayout.isHopper()) { ++ warpGroupId = urem(warpId, i32_val(warpsPerCTA[0])); ++ } else { ++ assert(mmaLayout.isAmpere()); ++ warpGroupId = udiv(warpId, i32_val(warpsPerCTA[1])); ++ } + Value laneId = urem(thread, threadsPerWarp); + Value laneGroupId = udiv(laneId, i32_val(kThreadsInGroup)); + Value columnId = urem(laneId, i32_val(shapePerCTATile[1])); diff --git a/third_party/triton/xla_extensions/sparse_dot_nvgpu.patch b/third_party/triton/xla_extensions/sparse_dot_nvgpu.patch index ced13cff33fd16..791618363b2f34 100644 --- a/third_party/triton/xla_extensions/sparse_dot_nvgpu.patch +++ b/third_party/triton/xla_extensions/sparse_dot_nvgpu.patch @@ -1,7 +1,8 @@ -diff --git a/include/triton/Dialect/NVGPU/IR/NVGPUOps.td b/include/triton/Dialect/NVGPU/IR/NVGPUOps.td ---- a/include/triton/Dialect/NVGPU/IR/NVGPUOps.td -+++ b/include/triton/Dialect/NVGPU/IR/NVGPUOps.td -@@ -87,6 +87,15 @@ def NVGPU_WGMMAOp : NVGPU_Op<"wgmma", [] +diff --git a/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td b/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td +index ca9d18873..d39bc6ec4 100644 +--- a/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td ++++ b/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td +@@ -87,6 +87,15 @@ def NVGPU_WGMMAOp : NVGPU_Op<"wgmma", []> { let assemblyFormat = "$opA `,` $opB (`,` $opC^)? attr-dict `:` functional-type(operands, $res)"; } @@ -18,9 +19,10 @@ diff --git a/include/triton/Dialect/NVGPU/IR/NVGPUOps.td b/include/triton/Dialec let arguments = (ins LLVM_AnyPointer:$addr, I32:$ctaId, I32Attr:$bitwidth, I32Attr:$vec); let builders = [ diff --git a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp b/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp +index e19216520..aacbfb569 100644 --- a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp +++ b/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp -@@ -688,6 +688,84 @@ public: +@@ -668,6 +668,84 @@ public: } }; @@ -31,7 +33,7 @@ diff --git a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp b/third_part + using Base::Base; + + std::vector getOutputConstraints(ttn::SparseWGMMAOp op) const { -+ auto outputStructType = op.getType().cast(); ++ auto outputStructType = cast(op.getType()); + uint32_t numOutputRegs = outputStructType.getBody().size(); + std::string output = + outputStructType.getBody().front().isF32() ? "=f" : "=r"; @@ -71,7 +73,7 @@ diff --git a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp b/third_part + + // Output and operand C + uint32_t numCRegs = -+ op.getType().cast().getBody().size(); ++ cast(op.getType()).getBody().size(); + args += "{"; + for (uint32_t i = 0; i < numCRegs; ++i) { + args += "$" + std::to_string(asmOpIdx++) + (i == numCRegs - 1 ? "" : ","); @@ -105,13 +107,17 @@ diff --git a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp b/third_part class ConvertNVGPUToLLVM : public ConvertNVGPUToLLVMBase { public: -@@ -711,7 +789,8 @@ public: +@@ -688,10 +766,9 @@ public: + patterns.add>( + context, Cluster_Cta_Id_Op, Constraints({"=r"}), Constraints()); - patterns.add(context); -+ WGMMAWaitGroupOpPattern, StoreDSmemOpPattern, -+ SparseWGMMAOpPattern>(context); +- patterns +- .add( +- context); ++ patterns.add(context); if (applyPatternsAndFoldGreedily(mod, std::move(patterns)).failed()) signalPassFailure(); diff --git a/third_party/triton/xla_extensions/sparse_dot_passes.patch b/third_party/triton/xla_extensions/sparse_dot_passes.patch index 74662cf3c90dc9..9136cb84b24254 100644 --- a/third_party/triton/xla_extensions/sparse_dot_passes.patch +++ b/third_party/triton/xla_extensions/sparse_dot_passes.patch @@ -1,7 +1,8 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp +index 4aa2712ec..16a6253d7 100644 --- a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp +++ b/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp -@@ -277,6 +277,89 @@ struct TritonDotPattern : public OpConve +@@ -279,6 +279,89 @@ struct TritonDotPattern : public OpConversionPattern { } }; @@ -12,7 +13,7 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co + LogicalResult matchAndRewrite( + triton::gpu::SparseDotOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { -+ RankedTensorType origType = op.getType().cast(); ++ RankedTensorType origType = cast(op.getType()); + auto origShape = origType.getShape(); + auto typeConverter = getTypeConverter(); + int numWarps = typeConverter->getNumWarps(); @@ -40,8 +41,8 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co + RankedTensorType::get(origShape, origType.getElementType(), dEncoding); + + // a & b must be of smem layout -+ auto aType = adaptor.getA().getType().cast(); -+ auto bType = adaptor.getB().getType().cast(); ++ auto aType = cast(adaptor.getA().getType()); ++ auto bType = cast(adaptor.getB().getType()); + Type aEltType = aType.getElementType(); + Type bEltType = bType.getElementType(); + Attribute aEncoding = aType.getEncoding(); @@ -51,14 +52,14 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co + Value a = adaptor.getA(); + Value b = adaptor.getB(); + Value c = adaptor.getC(); -+ if (!aEncoding.isa()) { ++ if (!isa(aEncoding)) { + Attribute encoding = triton::gpu::DotOperandEncodingAttr::get( + getContext(), 0, dEncoding, aEltType); + auto dstType = + RankedTensorType::get(aType.getShape(), aEltType, encoding); + a = rewriter.create(a.getLoc(), dstType, a); + } -+ if (!bEncoding.isa()) { ++ if (!isa(bEncoding)) { + Attribute encoding = triton::gpu::DotOperandEncodingAttr::get( + getContext(), 1, dEncoding, bEltType); + auto dstType = @@ -68,11 +69,11 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co + c = rewriter.create(c.getLoc(), retType, c); + + // aMeta must be of smem layout -+ auto aMetaType = adaptor.getAMeta().getType().cast(); ++ auto aMetaType = cast(adaptor.getAMeta().getType()); + Attribute aMetaEncoding = aMetaType.getEncoding(); + if (!aMetaEncoding) return failure(); + Value aMeta = adaptor.getAMeta(); -+ if (!aMetaEncoding.isa()) { ++ if (!isa(aMetaEncoding)) { + Attribute encoding = + triton::gpu::SparseDotMetaEncodingAttr::get(getContext(), dEncoding); + auto dstType = RankedTensorType::get( @@ -91,17 +92,17 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co struct TritonCatPattern : public OpConversionPattern { using OpConversionPattern::OpConversionPattern; -@@ -550,6 +633,7 @@ void populateTritonPatterns(TritonGPUTyp - GenericOpPattern, GenericOpPattern, +@@ -553,6 +636,7 @@ void populateTritonPatterns(TritonGPUTypeConverter &typeConverter, + GenericOpPattern, GenericOpPattern, TritonFuncOpPattern>(typeConverter, context); + patterns.insert(typeConverter, context); } // -@@ -788,6 +872,12 @@ public: - IntegerAttr::get( - i32_ty, llvm::APInt(32, computeCapability.getValue()))); +@@ -794,6 +878,12 @@ public: + mod->setAttr(AttrTargetName, + StringAttr::get(context, this->target.getValue())); + // Only transform sparse dot op with undefined layout. + target.addDynamicallyLegalOp( @@ -113,9 +114,10 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co return signalPassFailure(); diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp +index 098ee85e4..0516fc56f 100644 --- a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp +++ b/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp -@@ -42,8 +42,9 @@ static int getMMAVersionSafe(int compute +@@ -44,8 +44,9 @@ static int getMMAVersionSafe(int computeCapability, tt::DotOp op) { return 0; } @@ -126,7 +128,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect auto rank = shape.size(); // Early exit for batched matmul if (rank == 3) -@@ -56,14 +57,14 @@ warpsPerTileV2(tt::DotOp dotOp, const Ar +@@ -58,8 +59,8 @@ warpsPerTileV2(tt::DotOp dotOp, const ArrayRef shape, int numWarps) { auto slices = multiRootGetSlice(dotOp, {filter}, {filter}); bool hasChainedDot = false; for (Operation *op : slices) { @@ -137,14 +139,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect auto resTy = chainedDot.getResult().getType(); if (resTy.getRank() != rank) { continue; - } - if (auto mmaEncoding = -- resTy.getEncoding().dyn_cast()) { -+ resTy.getEncoding().template dyn_cast()) { - return ttg::getWarpsPerCTA(mmaEncoding); - } - hasChainedDot = true; -@@ -101,12 +102,13 @@ warpsPerTileV2(tt::DotOp dotOp, const Ar +@@ -103,12 +104,13 @@ warpsPerTileV2(tt::DotOp dotOp, const ArrayRef shape, int numWarps) { return ret; } @@ -162,7 +157,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect slices.end()) return {(unsigned)numWarps, 1}; -@@ -175,9 +177,10 @@ public: +@@ -178,9 +180,10 @@ public: : mlir::RewritePattern(tt::DotOp::getOperationName(), 2, context), computeCapability(computeCapability) {} @@ -176,7 +171,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect switch (version) { case 2: return warpsPerTileV2(dotOp, shape, numWarps); -@@ -337,6 +340,98 @@ public: +@@ -335,6 +338,98 @@ public: return success(); } }; @@ -201,7 +196,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + // Check data-types and SM compatibility + RankedTensorType oldRetType = dotOp.getType(); + if (!oldRetType.getEncoding() || -+ oldRetType.getEncoding().isa()) ++ isa(oldRetType.getEncoding())) + return failure(); + + assert(computeCapability >= 80 && @@ -216,7 +211,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + + auto instrShape = + mmaVersionToInstrShape(versionMajor, retShapePerCTA, -+ a.getType().cast(), numWarps); ++ cast(a.getType()), numWarps); + auto warpsPerTile = BlockedToMMA::getWarpsPerTile( + dotOp, retShapePerCTA, versionMajor, numWarps, instrShape); + ttg::NvidiaMmaEncodingAttr mmaEnc = @@ -232,7 +227,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + + if (versionMajor == 2) { + // convert A operand -+ auto oldAType = a.getType().cast(); ++ auto oldAType = cast(a.getType()); + auto newAEncoding = ttg::DotOperandEncodingAttr::get( + ctx, 0, mmaEnc, oldAType.getElementType()); + auto newAType = RankedTensorType::get( @@ -240,7 +235,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + a = rewriter.create(a.getLoc(), newAType, a); + + // convert B operand -+ auto oldBType = b.getType().cast(); ++ auto oldBType = cast(b.getType()); + auto newBEncoding = ttg::DotOperandEncodingAttr::get( + ctx, 1, mmaEnc, oldBType.getElementType()); + auto newBType = RankedTensorType::get( @@ -253,7 +248,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + + // convert metadata + Value meta = dotOp.getAMeta(); -+ auto oldMetaType = meta.getType().cast(); ++ auto oldMetaType = cast(meta.getType()); + auto newMetaType = RankedTensorType::get( + oldMetaType.getShape(), oldMetaType.getElementType(), + SparseDotMetaEncodingAttr::get(ctx, mmaEnc)); @@ -275,7 +270,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect } // namespace static Value promoteOperand(OpBuilder &builder, Location loc, Value operand, -@@ -397,6 +491,7 @@ public: +@@ -394,6 +489,7 @@ public: mlir::RewritePatternSet patterns(context); patterns.add<::BlockedToMMA>(context, computeCapability); @@ -284,33 +279,31 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect signalPassFailure(); } diff --git a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp b/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp +index 97ca6a840..f0ef124ff 100644 --- a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp +++ b/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp -@@ -47,6 +47,10 @@ struct PipelinedOpInfo { - bool loadIsMMAV3 = false; +@@ -188,6 +188,10 @@ public: + } }; -+bool isDotOp(Operation* op) { ++static bool isDotOp(Operation* op) { + return isa(op); +} + - } // namespace - static bool isMMAv3Dot(Operation *op) { -@@ -165,22 +169,28 @@ getSharedEncIfAllUsersAreDotEnc(Value val) { + auto dot = dyn_cast(op); + if (!dot) +@@ -399,19 +403,28 @@ getSharedEncIfAllUsersAreDotEnc(Value val) { } else { if (!isa(user)) return std::nullopt; -- auto dotOpEnc = user->getResult(0) -- .getType() -- .cast() -- .getEncoding() -- .dyn_cast(); +- auto dotOpEnc = dyn_cast( +- cast(user->getResult(0).getType()).getEncoding()); - if (!dotOpEnc) + auto enc = -+ user->getResult(0).getType().cast().getEncoding(); ++ cast(user->getResult(0).getType()).getEncoding(); + if (isa(enc)) { -+ auto srcTy = val.getType().cast(); ++ auto srcTy = cast(val.getType()); + auto CTALayout = ttg::getCTALayout(srcTy.getEncoding()); + auto order = ttg::getOrder(srcTy.getEncoding()); + unsigned bitWidth = srcTy.getElementType().getIntOrFloatBitWidth(); @@ -321,14 +314,14 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp b + srcTy.getElementType().getIntOrFloatBitWidth(), + /*needTrans=*/false); + } else if (isa(enc)) { -+ auto srcTy = val.getType().cast(); ++ auto srcTy = cast(val.getType()); + tempAttr = ttg::SharedEncodingAttr::get( + val.getContext(), /*vec=*/1, /*perPhase=*/1, /*maxPhase=*/1, + ttg::getOrder(srcTy.getEncoding()), + ttg::getCTALayout(srcTy.getEncoding())); + } else { return std::nullopt; -- auto srcTy = val.getType().cast(); +- auto srcTy = cast(val.getType()); - auto CTALayout = ttg::getCTALayout(srcTy.getEncoding()); - auto order = ttg::getOrder(srcTy.getEncoding()); - unsigned bitWidth = srcTy.getElementType().getIntOrFloatBitWidth(); @@ -341,71 +334,63 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp b } // Check that the shared encodings needed by the users are compatible. if (!tempAttr || (attr != nullptr && attr != tempAttr)) -@@ -313,7 +323,7 @@ loadOpsToDistanceAndUse(scf::ForOp forOp) { +@@ -518,7 +531,7 @@ loadOpsToIndirectionLevelAndUse(scf::ForOp forOp) { }; for (Operation &op : forOp.getBody()->without_terminator()) { - if (!isa(op)) + if (!isDotOp(&op)) continue; + seen.clear(); dfs(&op, 0, &op); - } -@@ -391,7 +401,8 @@ collectOpsToPipeline(scf::ForOp forOp, - // loads. - for (auto &[loadOp, distAndUse] : loadOpToDistAndUse) { - PipelinedOpInfo loadInfo; -- if (auto dot = dyn_cast(distAndUse.second)) { -+ if (isDotOp(distAndUse.second)) { -+ auto dot = dyn_cast(distAndUse.second); - if (loadIsMMAv3(loadOp)) { +@@ -595,7 +608,8 @@ assignMemoryLayouts(llvm::SmallVector> + continue; + } + +- if (auto dot = dyn_cast(use)) { ++ if (isDotOp(use)) { ++ auto dot = dyn_cast(use); + loadInfo.usedByDot = true; + if (loadIsMMAv3(op)) { loadInfo.loadIsMMAV3 = true; - loadInfo.sharedEncoding = -@@ -410,7 +421,7 @@ collectOpsToPipeline(scf::ForOp forOp, +@@ -614,7 +628,7 @@ assignMemoryLayouts(llvm::SmallVector> // The codegen bug is caught by an assertion, so if you think you've // fixed it, feel free to delete this code and see if the assert still // fails. :) - if (!loadInfo.sharedEncoding) { + if (dot && !loadInfo.sharedEncoding) { - if (auto dotEnc = dot.getResult() - .getType() - .getEncoding() -@@ -788,7 +799,7 @@ bool mlir::triton::preProcessLoopAndGetSchedule( - int useStage = opToInfo[info.use].stage; - int numBuffers = useStage - defStage; - -- if (hasMMAV3 && isa(info.use)) { -+ if (hasMMAV3 && isDotOp(info.use)) { - // For MMAv3, we need an extra buffer as this is assumed in the wgmma - // pipelining post-processing. - numBuffers++; + if (auto dotEnc = dyn_cast( + dot.getResult().getType().getEncoding())) { + auto loadTy = cast(op->getResultTypes()[0]); diff --git a/lib/Dialect/TritonGPU/Transforms/ReduceDataDuplication.cpp b/lib/Dialect/TritonGPU/Transforms/ReduceDataDuplication.cpp +index 2211df31b..ee5ff44d8 100644 --- a/lib/Dialect/TritonGPU/Transforms/ReduceDataDuplication.cpp +++ b/lib/Dialect/TritonGPU/Transforms/ReduceDataDuplication.cpp -@@ -36,6 +36,10 @@ public: +@@ -37,6 +37,10 @@ public: auto srcEncoding = srcType.getEncoding(); - if (srcEncoding.isa()) + if (isa(srcEncoding)) return; -+ if (dstType.getEncoding().isa()) { ++ if (isa(dstType.getEncoding())) { + replaceSparseMetaEncoding(cvtOp); + return; + } auto dstDotOp = - dstType.getEncoding().dyn_cast(); + dyn_cast(dstType.getEncoding()); if (!dstDotOp) -@@ -74,6 +78,27 @@ public: +@@ -83,6 +87,27 @@ public: cvtOp.erase(); }); } + + private: + void replaceSparseMetaEncoding(triton::gpu::ConvertLayoutOp cvtOp) { -+ auto srcType = cvtOp.getOperand().getType().cast(); ++ auto srcType = cast(cvtOp.getOperand().getType()); + auto srcEncoding = srcType.getEncoding(); + auto sharedLayout = triton::gpu::SharedEncodingAttr::get( + cvtOp.getContext(), 8, 1, 1, triton::gpu::getOrder(srcEncoding), + triton::gpu::getCTALayout(srcEncoding)); + -+ auto dstType = cvtOp.getType().cast(); ++ auto dstType = cast(cvtOp.getType()); + auto tmpType = triton::MemDescType::get( + dstType.getShape(), dstType.getElementType(), sharedLayout); + @@ -421,6 +406,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/ReduceDataDuplication.cpp b/lib/Di std::unique_ptr mlir::triton::gpu::createReduceDataDuplicationPass() { diff --git a/lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp b/lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp +index f456d36a6..a1dac2b72 100644 --- a/lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp +++ b/lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp @@ -45,7 +45,7 @@ public: @@ -432,7 +418,7 @@ diff --git a/lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp b/lib/Dia return WalkResult::advance(); OpBuilder builder(op); auto a = op->getOperand(0); -@@ -83,7 +83,7 @@ private: +@@ -80,7 +80,7 @@ private: static DenseSet> trace; auto op = operand.getDefiningOp(); // avoid redundant insertion diff --git a/third_party/xla/.kokoro/linux/build.sh b/third_party/xla/.kokoro/linux/build.sh index 87bf19cd7ad71f..c32a54520a5a05 100644 --- a/third_party/xla/.kokoro/linux/build.sh +++ b/third_party/xla/.kokoro/linux/build.sh @@ -48,13 +48,13 @@ docker run --name xla -w /tf/xla -itd --rm \ "$DOCKER_IMAGE" \ bash -TAGS_FILTER="-no_oss,-oss_excluded,-oss_serial" +TAGS_FILTER="-no_oss" ADDITIONAL_FLAGS="" RBE_FLAGS="" -TARGET_FILTERS="-@local_tsl//tsl/platform:subprocess_test -@local_tsl//tsl/platform/cloud:google_auth_provider_test -@local_tsl//tsl/platform/cloud:oauth_client_test" +TARGET_FILTERS="" if is_linux_gpu_job ; then - TAGS_FILTER="$TAGS_FILTER,gpu,requires-gpu-nvidia,-no_gpu" + TAGS_FILTER="$TAGS_FILTER,requires-gpu-nvidia" # We are currently running XLA presubmits on machines with NVIDIA T4 GPUs, # which have a compute compatibility of 7.5. Se we filter out all the tests @@ -62,34 +62,31 @@ if is_linux_gpu_job ; then UNSUPPORTED_GPU_TAGS="$(echo -requires-gpu-sm{80,86,89,90}{,-only})" TAGS_FILTER="${TAGS_FILTER},${UNSUPPORTED_GPU_TAGS// /,}" - ADDITIONAL_FLAGS="$ADDITIONAL_FLAGS --nobuild_tests_only --run_under=//tools/ci_build/gpu_build:parallel_gpu_execute" + ADDITIONAL_FLAGS="$ADDITIONAL_FLAGS --run_under=//tools/ci_build/gpu_build:parallel_gpu_execute" RBE_FLAGS="--config=rbe_linux_cuda_nvcc --jobs=150" echo "***NOTE: nvidia-smi lists the highest CUDA version the driver supports, which may be different than the version of CUDA actually used!!***" nvidia-smi else TAGS_FILTER="$TAGS_FILTER,-gpu,-requires-gpu-nvidia" ADDITIONAL_FLAGS="$ADDITIONAL_FLAGS --config=nonccl" + TARGET_FILTERS="$TARGET_FILTERS -//xla/service/gpu/..." if is_linux_cpu_arm64_job ; then TAGS_FILTER="$TAGS_FILTER,-no_aarch64" - ADDITIONAL_FLAGS="$ADDITIONAL_FLAGS --action_env PYTHON_BIN_PATH=/usr/bin/python3.11 --python_path=/usr/bin/python3.11" - # Some cross-compile tests are not working for XLA Linux Aarch64. - # TODO(ddunleavy): Revisit these when hermetic python is available. - TARGET_FILTERS="$TARGET_FILTERS -//xla/python_api:xla_shape_test -//xla/python_api:xla_literal_test -//xla/service:xla_aot_compile_stablehlo_cpu_test -//xla/tests:local_client_aot_test" RBE_FLAGS="--config=rbe_cross_compile_linux_arm64_xla --jobs=150" else RBE_FLAGS="--config=rbe_linux_cpu --jobs=150" - ADDITIONAL_FLAGS="$ADDITIONAL_FLAGS --nobuild_tests_only" fi fi # Build & test XLA docker exec xla bazel \ test \ - --build_tag_filters=$TAGS_FILTER \ + --build_tag_filters=$TAGS_FILTER \ --test_tag_filters=$TAGS_FILTER \ --test_output=errors \ --keep_going \ + --nobuild_tests_only \ --features=layering_check \ --profile=/tf/pkg/profile.json.gz \ --flaky_test_attempts=3 \ diff --git a/third_party/xla/WORKSPACE b/third_party/xla/WORKSPACE index 7ba74d6276c2e4..9d046e22949091 100644 --- a/third_party/xla/WORKSPACE +++ b/third_party/xla/WORKSPACE @@ -7,6 +7,32 @@ workspace(name = "xla") # restriction that load() statements need to be at the top of .bzl files. # E.g. we can not retrieve a new repository with http_archive and then load() # a macro from that repository in the same file. + +# Initialize hermetic Python +load("//third_party/py:python_init_rules.bzl", "python_init_rules") + +python_init_rules() + +load("//third_party/py:python_init_repositories.bzl", "python_init_repositories") + +python_init_repositories( + requirements = { + "3.11": "//:requirements_lock_3_11.txt", + }, +) + +load("//third_party/py:python_init_toolchains.bzl", "python_init_toolchains") + +python_init_toolchains() + +load("//third_party/py:python_init_pip.bzl", "python_init_pip") + +python_init_pip() + +load("@pypi//:requirements.bzl", "install_deps") + +install_deps() + load(":workspace4.bzl", "xla_workspace4") xla_workspace4() diff --git a/third_party/xla/build_tools/configure/configure.py b/third_party/xla/build_tools/configure/configure.py index 663e4b8724280d..2178a8b71dbc72 100755 --- a/third_party/xla/build_tools/configure/configure.py +++ b/third_party/xla/build_tools/configure/configure.py @@ -183,6 +183,7 @@ class Backend(ArgparseableEnum): CPU = enum.auto() CUDA = enum.auto() ROCM = enum.auto() + SYCL = enum.auto() class HostCompiler(ArgparseableEnum): @@ -402,6 +403,8 @@ def to_bazelrc_lines( rc.append("build --config nonccl") elif self.backend == Backend.ROCM: pass + elif self.backend == Backend.SYCL: + rc.append("build --config sycl") # Lines that are added for every backend if dpav.ld_library_path: diff --git a/third_party/xla/build_tools/sycl/build.sh b/third_party/xla/build_tools/sycl/build.sh new file mode 100644 index 00000000000000..5ed6beb86c8a3d --- /dev/null +++ b/third_party/xla/build_tools/sycl/build.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# Copyright 2024 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# +# A script to build XLA sycl target. +# +# Required input: +# workspace: the local to do this buidl + +if [ $# -lt 1 ];then + echo "Error: workspace not set." + exit 1 +fi + +workspace=$1 + +if [ -e ${workspace} ];then + time_stamp=$(date +%s%N) + echo "Warning: ${workspace} exist." + workspace=$workspace/$time_stamp + echo "Will use $workspace as new workspace" +fi + +mkdir -p $workspace + +xla_path=$workspace/xla +cd $workspace +git clone -b yang/ci https://github.com/Intel-tensorflow/xla xla +bash $xla_path/build_tools/sycl/install_bazel.sh $workspace +bash $xla_path/build_tools/sycl/install_oneapi.sh $workspace install +bash $xla_path/build_tools/sycl/build_xla.sh $workspace +bash $xla_path/build_tools/sycl/clean.sh $workspace + diff --git a/third_party/xla/build_tools/sycl/build_xla.sh b/third_party/xla/build_tools/sycl/build_xla.sh new file mode 100644 index 00000000000000..38e1c00e3dca21 --- /dev/null +++ b/third_party/xla/build_tools/sycl/build_xla.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# Copyright 2024 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +workspace=$1 +cd $workspace/xla + +if [ -z ${SYCL_TOOLKIT_PATH+x} ]; +then + export SYCL_TOOLKIT_PATH=$workspace/oneapi/compiler/2024.1/ +fi +bazel_bin=$(ls $workspace/bazel/) +./configure.py --backend=SYCL --host_compiler=GCC +$workspace/bazel/$bazel_bin build --config=verbose_logs -s --verbose_failures --nocheck_visibility //xla/tools:run_hlo_module diff --git a/third_party/xla/build_tools/sycl/clean.sh b/third_party/xla/build_tools/sycl/clean.sh new file mode 100644 index 00000000000000..b72aa75a84a622 --- /dev/null +++ b/third_party/xla/build_tools/sycl/clean.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Copyright 2024 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +workspace=$1 +SCRIPT_PATH="`dirname \"$0\"`" +bash $SCRIPT_PATH/install_oneapi.sh $workspace remove +rm -rf $workspace diff --git a/third_party/xla/build_tools/sycl/install_bazel.sh b/third_party/xla/build_tools/sycl/install_bazel.sh new file mode 100644 index 00000000000000..773dcee27cf2fa --- /dev/null +++ b/third_party/xla/build_tools/sycl/install_bazel.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Copyright 2024 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +workspace=$1 +cd $workspace/xla +bazel_version=$(head -1 .bazelversion) +bazel_base_url="https://github.com/bazelbuild/bazel/releases/download" +bazel_url=$bazel_base_url/$bazel_version/"bazel-$bazel_version-linux-x86_64" +echo $bazel_url +mkdir -p $workspace/bazel +cd $workspace/bazel +wget $bazel_url +bazel_bin=$(ls) +chmod +x $bazel_bin diff --git a/third_party/xla/build_tools/sycl/install_oneapi.sh b/third_party/xla/build_tools/sycl/install_oneapi.sh new file mode 100644 index 00000000000000..67ffe93f107cbb --- /dev/null +++ b/third_party/xla/build_tools/sycl/install_oneapi.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Copyright 2024 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +if [ -z ${SYCL_TOOLKIT_PATH+x} ]; +then +workspace=$1 +action=$2 +echo "Install Intel OneAPI in $workspace/oneapi" +cd $workspace +mkdir -p oneapi +if ! [ -f $workspace/l_BaseKit_p_2024.1.0.596.sh ]; then + echo "Download oneAPI package" + wget https://registrationcenter-download.intel.com/akdlm/IRC_NAS/fdc7a2bc-b7a8-47eb-8876-de6201297144/l_BaseKit_p_2024.1.0.596.sh +fi +bash l_BaseKit_p_2024.1.0.596.sh -a -s --eula accept --action $action --install-dir $workspace/oneapi --log-dir $workspace/oneapi/log --download-cache $workspace/oneapi/cache --components=intel.oneapi.lin.dpcpp-cpp-compiler:intel.oneapi.lin.mkl.devel +else + echo "SYCL_TOOLKIT_PATH set to $SYCL_TOOLKIT_PATH", skip install/remove oneAPI; +fi diff --git a/third_party/xla/docs/custom_call.md b/third_party/xla/docs/custom_call.md index 84633d697daa20..ef56830e08c764 100644 --- a/third_party/xla/docs/custom_call.md +++ b/third_party/xla/docs/custom_call.md @@ -1,18 +1,18 @@ # XLA custom calls -This document describes how to write and use XLA custom calls. Custom calls let -you invoke code written in a programming language like C++ or CUDA from an XLA -program. - -> **Caution:** Custom calls are a low-level power-user feature. It is easy to -> break your program in difficult-to-debug (and even difficult-to-notice) ways -> using custom calls. You shouldn't use custom calls unless you're prepared to -> debug XLA yourself when something goes wrong, and you should expect relatively -> less assistance from XLA developers if you run into trouble. - -> **Caution:** The custom-call API/ABI is not currently stable. We don't intend -> to change it capriciously, but it may change. Some possible future changes are -> described below. +This document describes how to write and use XLA custom calls using XLA FFI +library. Custom call is a mechanism to describe an external "operation" in the +HLO module to the XLA compiler (at compile time), and XLA FFI is a mechanism to +register implementation of such operations with XLA (at run time). FFI stands +for "foreign function interface" and it is a set of C APIs that define a binary +interface (ABI) for XLA to call into external code written in other programming +languages. XLA provides header-only bindings for XLA FFI written in C++, which +hides all the low level details of underlying C APIs from the end user. + +> **Caution:** The custom-call API/ABI uses PJRT-style versioning (major, minor), +> however at this point it is still experimental and can be broken at any time. +> Once API/ABI is finalized we intend to provide stability guarantees +> similar to PJRT. > **Caution** The HLO-visible names of functions registered with the custom-call > macros API do not respect C++ namespaces. As a result, accidental collisions @@ -42,30 +42,54 @@ void do_it() { xla::Parameter(&b, 1, xla::ShapeUtil::MakeShape(xla::F32, {2048}), "p1"); xla::XlaOp custom_call = xla::CustomCall(&b, "do_custom_call", /*operands=*/{param0, param1}, - /*shape=*/xla::ShapeUtil::MakeShape(xla::F32, {2048})); + /*shape=*/xla::ShapeUtil::MakeShape(xla::F32, {2048}), + /*opaque=*/"", /*has_side_effect=*/false, + /*output_operand_aliasing=*/{}, /*literal=*/nullptr, + /*schedule=*/CustomCallSchedule::SCHEDULE_NONE, + /*api_version=*/CustomCallApiVersion::API_VERSION_TYPED_FFI); } -void do_custom_call(void* out, const void** in) { - float* out_buf = reinterpret_cast(out); - const float* in0 = reinterpret_cast(in[0]); - const float* in1 = reinterpret_cast(in[1]); - for (int i = 0; i < 2048; ++i) { - out_buf[i] = in0[i % 128] + in1[i]; +// Constrain custom call arguments to rank-1 buffers of F32 data type. +using BufferF32 = xla::ffi::BufferR1; + +// Implement a custom call as a C+ function. Note that we can use `Buffer` type +// defined by XLA FFI that gives us access to buffer data type and shape. +xla::ffi::Error do_custom_call(BufferF32 in0, BufferF32 in1, + xla::ffi::Result out) { + size_t d0 = in0.dimensions[0]; + size_t d1 = in1.dimensions[0]; + + // Check that dimensions are compatible. + assert(out->dimensions[0] == d1 && "unexpected dimensions"); + + for (size_t i = 0; i < d1; ++i) { + out->data[i] = in0.data[i % d0] + in1.data[i]; } } -XLA_REGISTER_CUSTOM_CALL_TARGET(do_custom_call, "Host"); -``` -Notice that the function `do_custom_call` needs to know the dimensions of the -buffers it operates over. In this example we hardcode the sizes `128` and -`2048`. If you don't want to do this, you can pass the dimensions in as -parameters to the call. +// Explicitly define an XLA FFI handler signature and bind it to the +// `do_custom_call` implementation. XLA FFI handler can automatically infer +// type signature from the custom call function, but it relies on magical +// template metaprogramming an explicit binding provides and extra level of +// type checking and clearly states custom call author intentions. +XLA_FFI_DEFINE_HANDLER(handler, do_custom_call, + ffi::Ffi::Bind() + .Arg() + .Arg() + .Ret()); + +// Registers `handler` with and XLA FFI on a "Host" platform. +XLA_FFI_REGISTER_HANDLER(xla::ffi::GetXlaFfiApi(), "do_custom_call", + "Host", handler); +``` ## Create a custom call on GPU -The GPU custom call framework is somewhat different than that on the CPU. Here -is a CUDA example that does the same computation (`A[i] = B[i % 128] + C[i]`) as -the CPU code above. +The GPU custom call registration with XLA FFI is almost identical, the only +difference is that for GPU you need to ask for an underlying platform stream +(CUDA or ROCM stream) to be able to launch kernel on device. Here is a CUDA +example that does the same computation (`A[i] = B[i % 128] + C[i]`) as the CPU +code above. ```c++ void do_it() { /* same implementation as above */ } @@ -75,18 +99,29 @@ __global__ custom_call_kernel(const float* in0, const float* in1, float* out) { out[idx] = in0[idx % 128] + in1[idx]; } -void do_custom_call(CUstream stream, void** buffers, - const char* opaque, size_t opaque_len) { - const float* in0 = reinterpret_cast(buffers[0]); - const float* in1 = reinterpret_cast(buffers[1]); - float* out = reinterpret_cast(buffers[2]); +void do_custom_call(CUstream stream, BufferF32 in0, BufferF32 in1, + xla::ffi::Result out) { + size_t d0 = in0.dimensions[0]; + size_t d1 = in1.dimensions[0]; + size_t d2 = out->dimensions[0]; + + assert(d0 == 128 && d1 == 2048 && d2 == 2048 && "unexpected dimensions"); const int64_t block_dim = 64; const int64_t grid_dim = 2048 / block_dim; - custom_call_kernel<<>>(in0, in1, out); + custom_call_kernel<<>>( + in0.data, in1.data, out.data); } -XLA_REGISTER_CUSTOM_CALL_TARGET(do_custom_call, "CUDA"); + +XLA_FFI_DEFINE_HANDLER(handler, do_custom_call, + ffi::Ffi::Bind() + .Ctx>() + .Arg() + .Arg() + .Ret()); + +XLA_FFI_REGISTER_HANDLER(xla::ffi::GetXlaFfiApi(), "do_custom_call", + "CUDA", handler); ``` Notice first that the GPU custom call function *is still a function executed on @@ -94,99 +129,10 @@ the CPU*. The `do_custom_call` CPU function is responsible for enqueueing work on the GPU. Here it launches a CUDA kernel, but it could also do something else, like call cuBLAS. -`buffers` is an array of pointers that lives on the host, and each element it -contains points to device (i.e. GPU) memory. The parameters come first, followed -by the output value. This is notably different from the CPU calling convention, -which has two params, `ins` and `out`. The GPU calling convention makes it -possible to handle tuple-shaped inputs/outputs efficiently. - -As in the CPU example, we've hardcoded the input and output buffer sizes into -our custom call. However unlike in the CPU case, passing the buffer sizes in as -operands to the custom call would not work well. Usually we need the buffer -sizes available to us on the CPU (e.g. when launching a kernel, we need to know -the block/grid dimensions to use). But if we were to pass the buffer sizes as -operands to our custom call, their values would live in GPU memory. We'd then -have to do an expensive synchronous device-to-host `memcpy` at the start of our -operation just to read the sizes. - -To let you work around this, we provide the `opaque` parameter. You can set this -to an arbitrary string of bytes when you create the custom call: - -```c++ -std::string opaque = "..."; -xla::CustomCall(&b, "do_custom_call", /*operands=*/{param0, param1}, - /*output_shape=*/xla::ShapeUtil::MakeShape(xla::F32, {2048}), - opaque); -``` - -Because `xla::Shape` has a protocol buffer representation, you could store this -serialized proto inside of `opaque` and deserialize it within your GPU custom -call. Note however that although `xla::ShapeProto` does not change frequently, -it *does* change. Check the Git log to see how it has changed in the past. - -## Signalling an error - -If your custom call encounters an error, you can signal the error to the XLA -runtime (instead of e.g. crashing or returning nonsense in the output buffers) -by using the following signature for your function: - -**On CPU:** - -```c++ -#include "xla/service/custom_call_status.h" - -void do_custom_call(void* out, const void** in, XlaCustomCallStatus* status); -``` - -**on GPU:** - -```c++ -#include "xla/service/custom_call_status.h" - -void do_custom_call(CUstream stream, void** buffers, const char* opaque, - size_t opaque_len, xla::XlaCustomCallStatus* status); -``` - -You can signal failure by using `XlaCustomCallStatusSetFailure`, e.g.: - -```c++ -void do_custom_call(void* out, const void** in, XlaCustomCallStatus* status) { - // ... do some work. - - if (bad_condition) { - char* error_message = "An error occurred"; - XlaCustomCallStatusSetFailure(status, error_message, strlen(error_message)); - return; - } - - // ... continue. -} -``` - -You can also use `XlaCustomCallStatusSetSuccess` to indicate success, but the -`XlaCustomCallStatus` is in a success state by default, so ignoring it -completely will also indicate success. - -When using custom call functions with this signature, you must create the -corresponding `custom-call` op with the appropriate API version set, e.g.: - -```c++ -xla::CustomCall(&b, "do_custom_call", /*operands=*/{param0, param1}, - /*output_shape=*/xla::ShapeUtil::MakeShape(F32, {2048}), - opaque, /*has_side_effect=*/false, - /*output_operand_aliasing=*/{}, /*literal=*/nullptr, - /*schedule=*/xla::CustomCallSchedule::SCHEDULE_NONE, - /*api_version=*/API_VERSION_STATUS_RETURNING); -``` - -> **Note:** In the future all clients will be required to migrate their custom -> call functions to the new API version and the old one will be deprecated. For -> custom calls that can't fail, you can simply add the new -> `XlaCustomCallStatus*` parameter and then ignore it. - -On failure, none of the custom call outputs will be used; the XLA runtime will -terminate the computation. It is not possible for an HLO computation to recover -from the error (e.g. by catching and handling it). +Arguments and results also live on the host, and data member contains a pointer +to device (i.e. GPU) memory. Buffers passed to custom call handler have the +shape of the underlying device buffers, so the custom call can compute kernel +launch parameters from them. ## Passing tuples to custom calls @@ -209,32 +155,12 @@ Shape out_shape = ShapeUtil::MakeTuple({ ShapeUtil::MakeShape(F32, {512}), ShapeUtil::MakeShape(F32, {1024}), }); -xla::CustomCall(&b, "do_custom_call", /*operands=*/{p0}, out_shape); +xla::CustomCall(&b, "do_custom_call", /*operands=*/{p0}, out_shape, ...); ``` On both CPU and GPU, a tuple is represented in memory as an array of pointers. -In C++ pseudocode, parameter 0 above is laid out as follows. - -```c++ -// In-memory layout of parameter 0 from custom call above. True on both CPU -// and GPU. -float* subbuf0 = new float[32]; -float* subbuf1 = new float[64]; -float* subbuf2 = new float[128] -float* subbuf3 = new float[256]; - -void* subtuple = new void*[2]; -(*subtuple)[0] = subbuf1; -(*subtuple)[1] = subbuf2; - -void* p0 = new void*[3]; -(*p0)[0] = subbuf0; -(*p0)[1] = subtuple; -(*p0)[2] = subbuf3; -``` - -Although the in-memory representation of tuples is the same in CPU and GPU, they -are handled differently in the CPU and GPU custom-call calling conventions. +When XLA calls custom calls with tuple arguments or results it flattens them and +passes as regular buffer arguments or results. ### Tuple outputs as temp buffers @@ -256,29 +182,3 @@ been written to. That's exactly what you want from a temp buffer. In the example above, suppose we wanted to use the `F32[1024]` as a temp buffer. Then we'd write the HLO just as above, and we'd simply never read tuple index 1 of the custom call's output. - -### Tuples in CPU custom calls - -In CPU code, we have a function `do_custom_call(const void** ins, void* out)`. -`ins` is an array with just one element, which points to `param0`. The -subbuffers of `param0` are accessible by dereferencing that pointer, and the -subbuffers of `output_tuple` are accessible by dereferencing `out`. - -### Tuples in GPU custom calls - -In GPU code, we have a function `do_custom_call(..., void** buffers, ...)`. In -this case `buffers` is a host array of *six* device pointers, one for each leaf -buffer in the input/output. To generate the flat list, we iterate over the -parameters and output, and for each we do a preorder traversal of its shape. -Concretely: - -```c++ -// Layout of `buffers` parameter to GPU custom call function for custom-call -// above. -buffers[0] == subbuf0 -buffers[1] == subbuf1 -buffers[2] == subbuf2 -buffers[3] == subbuf3 -buffers[4] == output_subbuf0 -buffers[5] == output_subbuf1 -``` diff --git a/third_party/xla/docs/operation_semantics.md b/third_party/xla/docs/operation_semantics.md index d2828b642ad846..5a3626b2931e8b 100644 --- a/third_party/xla/docs/operation_semantics.md +++ b/third_party/xla/docs/operation_semantics.md @@ -1416,7 +1416,6 @@ For a more intuitive description, see the "Informal Description" section below. | `collapsed_slice_dims` | `ArraySlice` | The set of dimensions in each slice that are collapsed away. These dimensions must have size 1. | | `start_index_map` | `ArraySlice` | A map that describes how to map indices in `start_indices` to legal indices into operand. | | `indices_are_sorted` | `bool` | Whether the indices are guaranteed to be sorted by the caller. | -| `unique_indices` | `bool` | Whether the indices are guaranteed to be unique by the caller. | For convenience, we label dimensions in the output array not in `offset_dims` as `batch_dims`. @@ -1486,11 +1485,6 @@ If `indices_are_sorted` is set to true then XLA can assume that `start_indices` are sorted (in ascending `start_index_map` order) by the user. If they are not then the semantics is implementation defined. -If `unique_indices` is set to true then XLA can assume that all elements -scattered to are unique. So XLA could use non-atomic operations. If -`unique_indices` is set to true and the indices being scattered to are not -unique then the semantics is implementation defined. - ### Informal Description and Examples Informally, every index `Out` in the output array corresponds to an element `E` @@ -2382,6 +2376,7 @@ Arguments | Type | Semantics `inserted_window_dims` | `ArraySlice` | The set of *window dimensions* that must be inserted into `updates` shape. `scatter_dims_to_operand_dims` | `ArraySlice` | A dimensions map from the scatter indices to the operand index space. This array is interpreted as mapping `i` to `scatter_dims_to_operand_dims[i]` . It has to be one-to-one and total. `indices_are_sorted` | `bool` | Whether the indices are guaranteed to be sorted by the caller. +`unique_indices` | `bool` | Whether the indices are guaranteed to be unique by the caller. Where: @@ -2484,6 +2479,11 @@ If `indices_are_sorted` is set to true then XLA can assume that `start_indices` are sorted (in ascending `start_index_map` order) by the user. If they are not then the semantics is implementation defined. +If `unique_indices` is set to true then XLA can assume that all elements +scattered to are unique. So XLA could use non-atomic operations. If +`unique_indices` is set to true and the indices being scattered to are not +unique then the semantics is implementation defined. + Informally, the scatter op can be viewed as an _inverse_ of the gather op, i.e. the scatter op updates the elements in the input that are extracted by the corresponding gather op. diff --git a/third_party/xla/opensource_only.files b/third_party/xla/opensource_only.files index 7655cabdafeb6b..666a858f3e4bd9 100644 --- a/third_party/xla/opensource_only.files +++ b/third_party/xla/opensource_only.files @@ -19,13 +19,17 @@ third_party/llvm_openmp/openmp.bzl: third_party/ortools/BUILD: third_party/ortools/glpk.BUILD: third_party/ortools/ortools.patch: -third_party/py/non_hermetic/BUILD.tpl: -third_party/py/non_hermetic/BUILD: -third_party/py/non_hermetic/README: -third_party/py/non_hermetic/ml_dtypes/BUILD: -third_party/py/non_hermetic/ml_dtypes/LICENSE: -third_party/py/non_hermetic/numpy/BUILD: -third_party/py/non_hermetic/python_configure.bzl: +third_party/py/BUILD.tpl: +third_party/py/BUILD: +third_party/py/ml_dtypes/BUILD: +third_party/py/ml_dtypes/LICENSE: +third_party/py/numpy/BUILD: +third_party/py/python_configure.bzl: +third_party/py/python_init_pip.bzl: +third_party/py/python_init_repositories.bzl: +third_party/py/python_init_rules.bzl: +third_party/py/python_init_toolchains.bzl: +third_party/py/python_repo.bzl: third_party/python_runtime/BUILD: third_party/repo.bzl: third_party/stablehlo/BUILD: diff --git a/third_party/xla/requirements_lock_3_11.txt b/third_party/xla/requirements_lock_3_11.txt new file mode 100644 index 00000000000000..5c4bb687dfecae --- /dev/null +++ b/third_party/xla/requirements_lock_3_11.txt @@ -0,0 +1,49 @@ +numpy==1.24.3 \ + --hash=sha256:0ec87a7084caa559c36e0a2309e4ecb1baa03b687201d0a847c8b0ed476a7187 \ + --hash=sha256:1a7d6acc2e7524c9955e5c903160aa4ea083736fde7e91276b0e5d98e6332812 \ + --hash=sha256:202de8f38fc4a45a3eea4b63e2f376e5f2dc64ef0fa692838e31a808520efaf7 \ + --hash=sha256:210461d87fb02a84ef243cac5e814aad2b7f4be953b32cb53327bb49fd77fbb4 \ + --hash=sha256:2d926b52ba1367f9acb76b0df6ed21f0b16a1ad87c6720a1121674e5cf63e2b6 \ + --hash=sha256:352ee00c7f8387b44d19f4cada524586f07379c0d49270f87233983bc5087ca0 \ + --hash=sha256:35400e6a8d102fd07c71ed7dcadd9eb62ee9a6e84ec159bd48c28235bbb0f8e4 \ + --hash=sha256:3c1104d3c036fb81ab923f507536daedc718d0ad5a8707c6061cdfd6d184e570 \ + --hash=sha256:4719d5aefb5189f50887773699eaf94e7d1e02bf36c1a9d353d9f46703758ca4 \ + --hash=sha256:4749e053a29364d3452c034827102ee100986903263e89884922ef01a0a6fd2f \ + --hash=sha256:5342cf6aad47943286afa6f1609cad9b4266a05e7f2ec408e2cf7aea7ff69d80 \ + --hash=sha256:56e48aec79ae238f6e4395886b5eaed058abb7231fb3361ddd7bfdf4eed54289 \ + --hash=sha256:76e3f4e85fc5d4fd311f6e9b794d0c00e7002ec122be271f2019d63376f1d385 \ + --hash=sha256:7776ea65423ca6a15255ba1872d82d207bd1e09f6d0894ee4a64678dd2204078 \ + --hash=sha256:784c6da1a07818491b0ffd63c6bbe5a33deaa0e25a20e1b3ea20cf0e43f8046c \ + --hash=sha256:8535303847b89aa6b0f00aa1dc62867b5a32923e4d1681a35b5eef2d9591a463 \ + --hash=sha256:9a7721ec204d3a237225db3e194c25268faf92e19338a35f3a224469cb6039a3 \ + --hash=sha256:a1d3c026f57ceaad42f8231305d4653d5f05dc6332a730ae5c0bea3513de0950 \ + --hash=sha256:ab344f1bf21f140adab8e47fdbc7c35a477dc01408791f8ba00d018dd0bc5155 \ + --hash=sha256:ab5f23af8c16022663a652d3b25dcdc272ac3f83c3af4c02eb8b824e6b3ab9d7 \ + --hash=sha256:ae8d0be48d1b6ed82588934aaaa179875e7dc4f3d84da18d7eae6eb3f06c242c \ + --hash=sha256:c91c4afd8abc3908e00a44b2672718905b8611503f7ff87390cc0ac3423fb096 \ + --hash=sha256:d5036197ecae68d7f491fcdb4df90082b0d4960ca6599ba2659957aafced7c17 \ + --hash=sha256:d6cc757de514c00b24ae8cf5c876af2a7c3df189028d68c0cb4eaa9cd5afc2bf \ + --hash=sha256:d933fabd8f6a319e8530d0de4fcc2e6a61917e0b0c271fded460032db42a0fe4 \ + --hash=sha256:ea8282b9bcfe2b5e7d491d0bf7f3e2da29700cec05b49e64d6246923329f2b02 \ + --hash=sha256:ecde0f8adef7dfdec993fd54b0f78183051b6580f606111a6d789cd14c61ea0c \ + --hash=sha256:f21c442fdd2805e91799fbe044a7b999b8571bb0ab0f7850d0cb9641a687092b +lit==17.0.6 \ + --hash=sha256:dfa9af9b55fc4509a56be7bf2346f079d7f4a242d583b9f2e0b078fd0abae31b +ml-dtypes==0.3.2 \ + --hash=sha256:2c34f2ba9660b21fe1034b608308a01be82bbef2a92fb8199f24dc6bad0d5226 \ + --hash=sha256:3a17ef2322e60858d93584e9c52a5be7dd6236b056b7fa1ec57f1bb6ba043e33 \ + --hash=sha256:533059bc5f1764fac071ef54598db358c167c51a718f68f5bb55e3dee79d2967 \ + --hash=sha256:6604877d567a29bfe7cc02969ae0f2425260e5335505cf5e7fefc3e5465f5655 \ + --hash=sha256:6b35c4e8ca957c877ac35c79ffa77724ecc3702a1e4b18b08306c03feae597bb \ + --hash=sha256:763697ab8a88d47443997a7cdf3aac7340049aed45f7521f6b0ec8a0594821fe \ + --hash=sha256:7a4c3fcbf86fa52d0204f07cfd23947ef05b4ad743a1a988e163caa34a201e5e \ + --hash=sha256:7afde548890a92b41c0fed3a6c525f1200a5727205f73dc21181a2726571bb53 \ + --hash=sha256:7ba8e1fafc7fff3e643f453bffa7d082df1678a73286ce8187d3e825e776eb94 \ + --hash=sha256:91f8783fd1f2c23fd3b9ee5ad66b785dafa58ba3cdb050c4458021fa4d1eb226 \ + --hash=sha256:93b78f53431c93953f7850bb1b925a17f0ab5d97527e38a7e865b5b4bc5cfc18 \ + --hash=sha256:961134ea44c7b8ca63eda902a44b58cd8bd670e21d62e255c81fba0a8e70d9b7 \ + --hash=sha256:b89b194e9501a92d289c1ffd411380baf5daafb9818109a4f49b0a1b6dce4462 \ + --hash=sha256:c7b3fb3d4f6b39bcd4f6c4b98f406291f0d681a895490ee29a0f95bab850d53c \ + --hash=sha256:d1a746fe5fb9cd974a91070174258f0be129c592b93f9ce7df6cc336416c3fbd \ + --hash=sha256:e8505946df1665db01332d885c2020b4cb9e84a8b1241eb4ba69d59591f65855 \ + --hash=sha256:f47619d978ab1ae7dfdc4052ea97c636c6263e1f19bd1be0e42c346b98d15ff4 \ No newline at end of file diff --git a/third_party/xla/third_party/py/BUILD b/third_party/xla/third_party/py/BUILD index e69de29bb2d1d6..84eba77ce1a7af 100644 --- a/third_party/xla/third_party/py/BUILD +++ b/third_party/xla/third_party/py/BUILD @@ -0,0 +1,40 @@ +load("@python//:defs.bzl", "compile_pip_requirements") +load("@python_version_repo//:py_version.bzl", "REQUIREMENTS") + +compile_pip_requirements( + name = "requirements", + extra_args = [ + "--allow-unsafe", + "--build-isolation", + ], + generate_hashes = True, + requirements_in = "requirements.in", + requirements_txt = REQUIREMENTS, +) + +compile_pip_requirements( + name = "requirements_nightly", + data = ["test-requirements.txt"], + extra_args = [ + "--allow-unsafe", + "--build-isolation", + "--extra-index-url=https://pypi.anaconda.org/scientific-python-nightly-wheels/simple", + "--pre", + "--upgrade", + ], + generate_hashes = False, + requirements_in = "requirements.in", + requirements_txt = REQUIREMENTS, +) + +compile_pip_requirements( + name = "requirements_dev", + extra_args = [ + "--allow-unsafe", + "--build-isolation", + "--upgrade", + ], + generate_hashes = False, + requirements_in = "requirements.in", + requirements_txt = REQUIREMENTS, +) diff --git a/third_party/xla/third_party/py/non_hermetic/BUILD b/third_party/xla/third_party/py/non_hermetic/BUILD deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/third_party/xla/third_party/py/non_hermetic/BUILD.tpl b/third_party/xla/third_party/py/non_hermetic/BUILD.tpl deleted file mode 100644 index 45480bd4a31cf8..00000000000000 --- a/third_party/xla/third_party/py/non_hermetic/BUILD.tpl +++ /dev/null @@ -1,80 +0,0 @@ -licenses(["restricted"]) - -package(default_visibility = ["//visibility:public"]) - -# Point both runtimes to the same python binary to ensure we always -# use the python binary specified by ./configure.py script. -load("@bazel_tools//tools/python:toolchain.bzl", "py_runtime_pair") - -py_runtime( - name = "py2_runtime", - interpreter_path = "%{PYTHON_BIN_PATH}", - python_version = "PY2", -) - -py_runtime( - name = "py3_runtime", - interpreter_path = "%{PYTHON_BIN_PATH}", - python_version = "PY3", -) - -py_runtime_pair( - name = "py_runtime_pair", - py2_runtime = ":py2_runtime", - py3_runtime = ":py3_runtime", -) - -toolchain( - name = "py_toolchain", - toolchain = ":py_runtime_pair", - toolchain_type = "@bazel_tools//tools/python:toolchain_type", - target_compatible_with = [%{PLATFORM_CONSTRAINT}], - exec_compatible_with = [%{PLATFORM_CONSTRAINT}], -) - -# To build Python C/C++ extension on Windows, we need to link to python import library pythonXY.lib -# See https://docs.python.org/3/extending/windows.html -cc_import( - name = "python_lib", - interface_library = select({ - ":windows": ":python_import_lib", - # A placeholder for Unix platforms which makes --no_build happy. - "//conditions:default": "not-existing.lib", - }), - system_provided = 1, -) - -cc_library( - name = "python_headers", - hdrs = [":python_include"], - deps = select({ - ":windows": [":python_lib"], - "//conditions:default": [], - }), - includes = ["python_include"], -) - -# This alias is exists for the use of targets in the @llvm-project dependency, -# which expect a python_headers target called @python_runtime//:headers. We use -# a repo_mapping to alias python_runtime to this package, and an alias to create -# the correct target. -alias( - name = "headers", - actual = ":python_headers", -) - -cc_library( - name = "numpy_headers", - hdrs = [":numpy_include"], - includes = ["numpy_include"], -) - -config_setting( - name = "windows", - values = {"cpu": "x64_windows"}, - visibility = ["//visibility:public"], -) - -%{PYTHON_INCLUDE_GENRULE} -%{NUMPY_INCLUDE_GENRULE} -%{PYTHON_IMPORT_LIB_GENRULE} \ No newline at end of file diff --git a/third_party/xla/third_party/py/non_hermetic/README b/third_party/xla/third_party/py/non_hermetic/README deleted file mode 100644 index 62188a5817a09e..00000000000000 --- a/third_party/xla/third_party/py/non_hermetic/README +++ /dev/null @@ -1,3 +0,0 @@ -This is a temporary copy of python_configure repository rule. It allows -XLA and TSL to keep non-hermetic Python while TF uses hermetic Python. -DO NOT DEPEND ON THIS COPY as it will be deleted soon. \ No newline at end of file diff --git a/third_party/xla/third_party/py/non_hermetic/python_configure.bzl b/third_party/xla/third_party/py/non_hermetic/python_configure.bzl deleted file mode 100644 index 89732c3e33d8ee..00000000000000 --- a/third_party/xla/third_party/py/non_hermetic/python_configure.bzl +++ /dev/null @@ -1,312 +0,0 @@ -"""Repository rule for Python autoconfiguration. - -`python_configure` depends on the following environment variables: - - * `PYTHON_BIN_PATH`: location of python binary. - * `PYTHON_LIB_PATH`: Location of python libraries. -""" - -load( - "//third_party/remote_config:common.bzl", - "BAZEL_SH", - "PYTHON_BIN_PATH", - "PYTHON_LIB_PATH", - "TF_PYTHON_CONFIG_REPO", - "auto_config_fail", - "config_repo_label", - "execute", - "get_bash_bin", - "get_host_environ", - "get_python_bin", - "is_windows", - "raw_exec", - "read_dir", -) - -def _genrule(src_dir, genrule_name, command, outs): - """Returns a string with a genrule. - - Genrule executes the given command and produces the given outputs. - """ - return ( - "genrule(\n" + - ' name = "' + - genrule_name + '",\n' + - " outs = [\n" + - outs + - "\n ],\n" + - ' cmd = """\n' + - command + - '\n """,\n' + - ")\n" - ) - -def _norm_path(path): - """Returns a path with '/' and remove the trailing slash.""" - path = path.replace("\\", "/") - if path[-1] == "/": - path = path[:-1] - return path - -def _symlink_genrule_for_dir( - repository_ctx, - src_dir, - dest_dir, - genrule_name, - src_files = [], - dest_files = []): - """Returns a genrule to symlink(or copy if on Windows) a set of files. - - If src_dir is passed, files will be read from the given directory; otherwise - we assume files are in src_files and dest_files - """ - if src_dir != None: - src_dir = _norm_path(src_dir) - dest_dir = _norm_path(dest_dir) - files = "\n".join(read_dir(repository_ctx, src_dir)) - - # Create a list with the src_dir stripped to use for outputs. - dest_files = files.replace(src_dir, "").splitlines() - src_files = files.splitlines() - command = [] - outs = [] - for i in range(len(dest_files)): - if dest_files[i] != "": - # If we have only one file to link we do not want to use the dest_dir, as - # $(@D) will include the full path to the file. - dest = "$(@D)/" + dest_dir + dest_files[i] if len(dest_files) != 1 else "$(@D)/" + dest_files[i] - - # Copy the headers to create a sandboxable setup. - cmd = "cp -f" - command.append(cmd + ' "%s" "%s"' % (src_files[i], dest)) - outs.append(' "' + dest_dir + dest_files[i] + '",') - genrule = _genrule( - src_dir, - genrule_name, - " && ".join(command), - "\n".join(outs), - ) - return genrule - -def _get_python_lib(repository_ctx, python_bin): - """Gets the python lib path.""" - python_lib = get_host_environ(repository_ctx, PYTHON_LIB_PATH) - if python_lib != None: - return python_lib - - # The interesting program to execute. - print_lib = [ - "from __future__ import print_function", - "import site", - "import os", - "python_paths = []", - "if os.getenv('PYTHONPATH') is not None:", - " python_paths = os.getenv('PYTHONPATH').split(':')", - "try:", - " library_paths = site.getsitepackages()", - "except AttributeError:", - " from distutils.sysconfig import get_python_lib", - " library_paths = [get_python_lib()]", - "all_paths = set(python_paths + library_paths)", - "paths = []", - "for path in all_paths:", - " if os.path.isdir(path):", - " paths.append(path)", - "if len(paths) >=1:", - " print(paths[0])", - ] - - # The below script writes the above program to a file - # and executes it. This is to work around the limitation - # of not being able to upload files as part of execute. - cmd = "from os import linesep;" - cmd += "f = open('script.py', 'w');" - for line in print_lib: - cmd += "f.write(\"%s\" + linesep);" % line - cmd += "f.close();" - cmd += "from subprocess import call;" - cmd += "call([\"%s\", \"script.py\"]);" % python_bin - - result = execute(repository_ctx, [python_bin, "-c", cmd]) - return result.stdout.strip() - -def _check_python_lib(repository_ctx, python_lib): - """Checks the python lib path.""" - cmd = 'test -d "%s" -a -x "%s"' % (python_lib, python_lib) - result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd]) - if result.return_code == 1: - auto_config_fail("Invalid python library path: %s" % python_lib) - -def _check_python_bin(repository_ctx, python_bin): - """Checks the python bin path.""" - cmd = '[[ -x "%s" ]] && [[ ! -d "%s" ]]' % (python_bin, python_bin) - result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd]) - if result.return_code == 1: - auto_config_fail("--define %s='%s' is not executable. Is it the python binary?" % ( - PYTHON_BIN_PATH, - python_bin, - )) - -def _get_python_include(repository_ctx, python_bin): - """Gets the python include path.""" - result = execute( - repository_ctx, - [ - python_bin, - "-Wignore", - "-c", - "import sysconfig; " + - "print(sysconfig.get_path('include'))", - ], - error_msg = "Problem getting python include path.", - error_details = ("Is the Python binary path set up right? " + - "(See ./configure or " + PYTHON_BIN_PATH + ".) " + - "Is distutils installed?"), - ) - return result.stdout.splitlines()[0] - -def _get_python_import_lib_name(repository_ctx, python_bin): - """Get Python import library name (pythonXY.lib) on Windows.""" - result = execute( - repository_ctx, - [ - python_bin, - "-c", - "import sys;" + - 'print("python" + str(sys.version_info[0]) + ' + - ' str(sys.version_info[1]) + ".lib")', - ], - error_msg = "Problem getting python import library.", - error_details = ("Is the Python binary path set up right? " + - "(See ./configure or " + PYTHON_BIN_PATH + ".) "), - ) - return result.stdout.splitlines()[0] - -def _get_numpy_include(repository_ctx, python_bin): - """Gets the numpy include path.""" - return execute( - repository_ctx, - [ - python_bin, - "-c", - "from __future__ import print_function;" + - "import numpy;" + - " print(numpy.get_include());", - ], - error_msg = "Problem getting numpy include path.", - error_details = "Is numpy installed?", - ).stdout.splitlines()[0] - -def _create_local_python_repository(repository_ctx): - """Creates the repository containing files set up to build with Python.""" - - # Resolve all labels before doing any real work. Resolving causes the - # function to be restarted with all previous state being lost. This - # can easily lead to a O(n^2) runtime in the number of labels. - build_tpl = repository_ctx.path(Label("//third_party/py:BUILD.tpl")) - - python_bin = get_python_bin(repository_ctx) - _check_python_bin(repository_ctx, python_bin) - python_lib = _get_python_lib(repository_ctx, python_bin) - _check_python_lib(repository_ctx, python_lib) - python_include = _get_python_include(repository_ctx, python_bin) - numpy_include = _get_numpy_include(repository_ctx, python_bin) + "/numpy" - python_include_rule = _symlink_genrule_for_dir( - repository_ctx, - python_include, - "python_include", - "python_include", - ) - python_import_lib_genrule = "" - - # To build Python C/C++ extension on Windows, we need to link to python import library pythonXY.lib - # See https://docs.python.org/3/extending/windows.html - if is_windows(repository_ctx): - python_bin = python_bin.replace("\\", "/") - python_include = _norm_path(python_include) - python_import_lib_name = _get_python_import_lib_name(repository_ctx, python_bin) - python_import_lib_src = python_include.rsplit("/", 1)[0] + "/libs/" + python_import_lib_name - python_import_lib_genrule = _symlink_genrule_for_dir( - repository_ctx, - None, - "", - "python_import_lib", - [python_import_lib_src], - [python_import_lib_name], - ) - numpy_include_rule = _symlink_genrule_for_dir( - repository_ctx, - numpy_include, - "numpy_include/numpy", - "numpy_include", - ) - - platform_constraint = "" - if repository_ctx.attr.platform_constraint: - platform_constraint = "\"%s\"" % repository_ctx.attr.platform_constraint - repository_ctx.template("BUILD", build_tpl, { - "%{PYTHON_BIN_PATH}": python_bin, - "%{PYTHON_INCLUDE_GENRULE}": python_include_rule, - "%{PYTHON_IMPORT_LIB_GENRULE}": python_import_lib_genrule, - "%{NUMPY_INCLUDE_GENRULE}": numpy_include_rule, - "%{PLATFORM_CONSTRAINT}": platform_constraint, - }) - -def _create_remote_python_repository(repository_ctx, remote_config_repo): - """Creates pointers to a remotely configured repo set up to build with Python. - """ - repository_ctx.template("BUILD", config_repo_label(remote_config_repo, ":BUILD"), {}) - -def _python_autoconf_impl(repository_ctx): - """Implementation of the python_autoconf repository rule.""" - if get_host_environ(repository_ctx, TF_PYTHON_CONFIG_REPO) != None: - _create_remote_python_repository( - repository_ctx, - get_host_environ(repository_ctx, TF_PYTHON_CONFIG_REPO), - ) - else: - _create_local_python_repository(repository_ctx) - -_ENVIRONS = [ - BAZEL_SH, - PYTHON_BIN_PATH, - PYTHON_LIB_PATH, -] - -local_python_configure = repository_rule( - implementation = _create_local_python_repository, - environ = _ENVIRONS, - attrs = { - "environ": attr.string_dict(), - "platform_constraint": attr.string(), - }, -) - -remote_python_configure = repository_rule( - implementation = _create_local_python_repository, - environ = _ENVIRONS, - remotable = True, - attrs = { - "environ": attr.string_dict(), - "platform_constraint": attr.string(), - }, -) - -python_configure = repository_rule( - implementation = _python_autoconf_impl, - environ = _ENVIRONS + [TF_PYTHON_CONFIG_REPO], - attrs = { - "platform_constraint": attr.string(), - }, -) -"""Detects and configures the local Python. - -Add the following to your WORKSPACE FILE: - -```python -python_configure(name = "local_config_python") -``` - -Args: - name: A unique name for this workspace rule. -""" diff --git a/third_party/xla/third_party/py/python_init_pip.bzl b/third_party/xla/third_party/py/python_init_pip.bzl new file mode 100644 index 00000000000000..efc2bf8233cf61 --- /dev/null +++ b/third_party/xla/third_party/py/python_init_pip.bzl @@ -0,0 +1,34 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@python//:defs.bzl", "interpreter") +load("@python_version_repo//:py_version.bzl", "REQUIREMENTS") +load("@rules_python//python:pip.bzl", "package_annotation", "pip_parse") + +def python_init_pip(): + numpy_annotations = { + "numpy": package_annotation( + additive_build_content = """\ +cc_library( + name = "numpy_headers_2", + hdrs = glob(["site-packages/numpy/_core/include/**/*.h"]), + strip_include_prefix="site-packages/numpy/_core/include/", +) +cc_library( + name = "numpy_headers_1", + hdrs = glob(["site-packages/numpy/core/include/**/*.h"]), + strip_include_prefix="site-packages/numpy/core/include/", +) +cc_library( + name = "numpy_headers", + deps = [":numpy_headers_2", ":numpy_headers_1"], +) +""", + ), + } + + pip_parse( + name = "pypi", + annotations = numpy_annotations, + python_interpreter_target = interpreter, + requirements_lock = REQUIREMENTS, + ) diff --git a/third_party/xla/third_party/py/python_init_repositories.bzl b/third_party/xla/third_party/py/python_init_repositories.bzl new file mode 100644 index 00000000000000..5a405f2c2aba4c --- /dev/null +++ b/third_party/xla/third_party/py/python_init_repositories.bzl @@ -0,0 +1,12 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@rules_python//python:repositories.bzl", "py_repositories") +load("//third_party/py:python_repo.bzl", "python_repository") + +def python_init_repositories(requirements = {}): + python_repository( + name = "python_version_repo", + requirements_versions = requirements.keys(), + requirements_locks = requirements.values(), + ) + py_repositories() diff --git a/third_party/xla/third_party/py/python_init_rules.bzl b/third_party/xla/third_party/py/python_init_rules.bzl new file mode 100644 index 00000000000000..98a7b8bc3c315a --- /dev/null +++ b/third_party/xla/third_party/py/python_init_rules.bzl @@ -0,0 +1,11 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def python_init_rules(): + http_archive( + name = "rules_python", + sha256 = "9d04041ac92a0985e344235f5d946f71ac543f1b1565f2cdbc9a2aaee8adf55b", + strip_prefix = "rules_python-0.26.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.26.0/rules_python-0.26.0.tar.gz", + ) diff --git a/third_party/xla/third_party/py/python_init_toolchains.bzl b/third_party/xla/third_party/py/python_init_toolchains.bzl new file mode 100644 index 00000000000000..c1f800db4c01e7 --- /dev/null +++ b/third_party/xla/third_party/py/python_init_toolchains.bzl @@ -0,0 +1,13 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@python_version_repo//:py_version.bzl", "HERMETIC_PYTHON_VERSION") +load("@rules_python//python:repositories.bzl", "python_register_toolchains") +load("@rules_python//python:versions.bzl", "MINOR_MAPPING") + +def python_init_toolchains(): + if HERMETIC_PYTHON_VERSION in MINOR_MAPPING: + python_register_toolchains( + name = "python", + ignore_root_user_error = True, + python_version = HERMETIC_PYTHON_VERSION, + ) diff --git a/third_party/xla/third_party/py/python_repo.bzl b/third_party/xla/third_party/py/python_repo.bzl new file mode 100644 index 00000000000000..77a6ce9ce50b60 --- /dev/null +++ b/third_party/xla/third_party/py/python_repo.bzl @@ -0,0 +1,206 @@ +""" +Repository rule to manage hermetic Python interpreter under Bazel. + +Version can be set via build parameter "--repo_env=HERMETIC_PYTHON_VERSION=3.11" +Defaults to 3.11. + +To set wheel name, add "--repo_env=WHEEL_NAME=tensorflow_cpu" +""" + +VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] +DEFAULT_VERSION = "3.11" +WARNING = """ +HERMETIC_PYTHON_VERSION variable was not set correctly, using default version. +Python {} will be used. +To select Python version, either set HERMETIC_PYTHON_VERSION env variable in +your shell: + export HERMETIC_PYTHON_VERSION=3.12 +OR pass it as an argument to bazel command directly or inside your .bazelrc +file: + --repo_env=HERMETIC_PYTHON_VERSION=3.12 +""".format(DEFAULT_VERSION) + +content = """TF_PYTHON_VERSION = "{version}" +HERMETIC_PYTHON_VERSION = "{version}" +WHEEL_NAME = "{wheel_name}" +WHEEL_COLLAB = "{wheel_collab}" +REQUIREMENTS = "{requirements}" +""" + +def _python_repository_impl(ctx): + ctx.file("BUILD", "") + version_legacy = ctx.os.environ.get("TF_PYTHON_VERSION", "") + version = ctx.os.environ.get("HERMETIC_PYTHON_VERSION", "") + if not version: + version = version_legacy + else: + version_legacy = version + + wheel_name = ctx.os.environ.get("WHEEL_NAME", "tensorflow") + wheel_collab = ctx.os.environ.get("WHEEL_COLLAB", False) + if version not in VERSIONS: + print(WARNING) # buildifier: disable=print + version = DEFAULT_VERSION + else: + print("Using hermetic Python %s" % version) # buildifier: disable=print + + requirements = "" + for i in range(0, len(ctx.attr.requirements_locks)): + if ctx.attr.requirements_versions[i] == version: + requirements = ctx.attr.requirements_locks[i] + break + + ctx.file( + "py_version.bzl", + content.format( + version = version, + wheel_name = wheel_name, + wheel_collab = wheel_collab, + requirements = str(requirements), + ), + ) + +python_repository = repository_rule( + implementation = _python_repository_impl, + attrs = { + "requirements_versions": attr.string_list( + mandatory = False, + default = [], + ), + "requirements_locks": attr.label_list( + mandatory = False, + default = [], + ), + }, + environ = [ + "TF_PYTHON_VERSION", + "HERMETIC_PYTHON_VERSION", + "WHEEL_NAME", + "WHEEL_COLLAB", + ], +) + +def _custom_python_interpreter_impl(ctx): + version = ctx.attr.version + strip_prefix = ctx.attr.strip_prefix.format(version = version) + urls = [url.format(version = version) for url in ctx.attr.urls] + binary_name = ctx.attr.binary_name + if not binary_name: + ver_chunks = version.split(".") + binary_name = "python%s.%s" % (ver_chunks[0], ver_chunks[1]) + + install_dir = "{name}-{version}".format(name = ctx.attr.name, version = version) + _exec_and_check(ctx, ["mkdir", install_dir]) + install_path = ctx.path(install_dir) + srcs_dir = "srcs" + ctx.download_and_extract( + url = urls, + stripPrefix = strip_prefix, + output = srcs_dir, + ) + + configure_params = [] + if "CC" in ctx.os.environ: + configure_params.append("CC={}".format(ctx.os.environ["CC"])) + if "CXX" in ctx.os.environ: + configure_params.append("CXX={}".format(ctx.os.environ["CXX"])) + + configure_params.append("--enable-optimizations") + configure_params.append("--prefix=%s" % install_path.realpath) + _exec_and_check( + ctx, + ["./configure"] + configure_params, + working_directory = srcs_dir, + quiet = False, + ) + res = _exec_and_check(ctx, ["nproc"]) + cores = 12 if res.return_code != 0 else max(1, int(res.stdout.strip()) - 1) + _exec_and_check(ctx, ["make", "-j%s" % cores], working_directory = srcs_dir) + _exec_and_check(ctx, ["make", "altinstall"], working_directory = srcs_dir) + _exec_and_check(ctx, ["ln", "-s", binary_name, "python3"], working_directory = install_dir + "/bin") + tar = "{install_dir}.tgz".format(install_dir = install_dir) + _exec_and_check(ctx, ["tar", "czpf", tar, install_dir]) + _exec_and_check(ctx, ["rm", "-rf", srcs_dir]) + res = _exec_and_check(ctx, ["sha256sum", tar]) + + sha256 = res.stdout.split(" ")[0].strip() + tar_path = ctx.path(tar) + + example = """\n\n +To use newly built Python interpreter add the following code snippet RIGHT AFTER +python_init_toolchains() in your WORKSPACE file. The code sample should work as +is but it may need some tuning, if you have special requirements. + +``` +load("@rules_python//python:repositories.bzl", "python_register_toolchains") +python_register_toolchains( + name = "python", + # By default assume the interpreter is on the local file system, replace + # with proper URL if it is not the case. + base_url = "file://", + ignore_root_user_error = True, + python_version = "{version}", + tool_versions = {{ + "{version}": {{ + # Path to .tar.gz with Python binary. By default it points to .tgz + # file in cache where it was built originally; replace with proper + # file location, if you moved it somewhere else. + "url": "{tar_path}", + "sha256": {{ + # By default we assume Linux x86_64 architecture, eplace with + # proper architecture if you were building on a different platform. + "x86_64-unknown-linux-gnu": "{sha256}", + }}, + "strip_prefix": "{install_dir}", + }}, + }}, +) +``` +\n\n""".format(version = version, tar_path = tar_path, sha256 = sha256, install_dir = install_dir) + + instructions = "INSTRUCTIONS-{version}.md".format(version = version) + ctx.file(instructions + ".tmpl", example, executable = False) + ctx.file( + "BUILD.bazel", + """ +genrule( + name = "{name}", + srcs = ["{tar}", "{instructions}.tmpl"], + outs = ["{install_dir}.tar.gz", "{instructions}"], + cmd = "cp $(location {tar}) $(location {install_dir}.tar.gz); cp $(location {instructions}.tmpl) $(location {instructions})", + visibility = ["//visibility:public"], +) + """.format( + name = ctx.attr.name, + tar = tar, + install_dir = install_dir, + instructions = instructions, + ), + executable = False, + ) + + print(example) # buildifier: disable=print + +custom_python_interpreter = repository_rule( + implementation = _custom_python_interpreter_impl, + attrs = { + "urls": attr.string_list(), + "strip_prefix": attr.string(), + "binary_name": attr.string(mandatory = False), + "version": attr.string(), + }, +) + +def _exec_and_check(ctx, command, fail_on_error = True, quiet = False, **kwargs): + res = ctx.execute(command, quiet = quiet, **kwargs) + if fail_on_error and res.return_code != 0: + fail(""" +Failed to execute command: `{command}` +Exit Code: {code} +STDERR: {stderr} + """.format( + command = command, + code = res.return_code, + stderr = res.stderr, + )) + return res diff --git a/third_party/xla/third_party/stablehlo/temporary.patch b/third_party/xla/third_party/stablehlo/temporary.patch index 2e395d99d26cf4..85a49110b59f95 100755 --- a/third_party/xla/third_party/stablehlo/temporary.patch +++ b/third_party/xla/third_party/stablehlo/temporary.patch @@ -164,81 +164,6 @@ diff --ruN a/stablehlo/CMakeLists.txt b/stablehlo/CMakeLists.txt #------------------------------------------------------------------------------- # Directory setup -diff --ruN a/stablehlo/docs/spec.md b/stablehlo/docs/spec.md ---- stablehlo/docs/spec.md -+++ stablehlo/docs/spec.md -@@ -2532,10 +2532,10 @@ - rhs_batching_dimensions, lhs_contracting_dimensions, - rhs_contracting_dimensions, precision_config), lhs, rhs, type(result))`. - --For hybrid quantized types, performs `hybrid_dequantize_then_op( -- lambda lhs, rhs: dot_general(lhs, rhs, lhs_batching_dimensions, -- rhs_batching_dimensions, lhs_contracting_dimensions, -- rhs_contracting_dimensions, precision_config), lhs, rhs)`. -+This only specifies semantics for per-tensor quantization. Per-axis quantization -+is work in progress ([#1574](https://github.com/openxla/stablehlo/issues/1574)). -+Also, in the future we may consider adding support for hybrid quantization -+ ([#1575](https://github.com/openxla/stablehlo/issues/1575)). - - `precision_config` controls the tradeoff between speed and accuracy for - computations on accelerator backends. This can be one of the following (at the -@@ -2552,21 +2552,21 @@ - - #### Inputs - --| Label | Name | Type | Constraints | --|-------|------------------------------|--------------------------------------------------------------|------------------------------------------------| --| (I1) | `lhs` | tensor or per-tensor quantized tensor | (C5-C6), (C9-C10), (C12-C14), (C17-C18), (C20) | --| (I2) | `rhs` | tensor or quantized tensor | (C7-C10), (C12-C20) | --| (I3) | `lhs_batching_dimensions` | 1-dimensional tensor constant of type `si64` | (C1), (C3), (C5), (C9), (C12) | --| (I4) | `rhs_batching_dimensions` | 1-dimensional tensor constant of type `si64` | (C1), (C4), (C7), (C9) | --| (I5) | `lhs_contracting_dimensions` | 1-dimensional tensor constant of type `si64` | (C2), (C3), (C6), (C10) | --| (I6) | `rhs_contracting_dimensions` | 1-dimensional tensor constant of type `si64` | (C2), (C4), (C8), (C10), (C16) | --| (I7) | `precision_config` | variadic number of enums of `DEFAULT`, `HIGH`, and `HIGHEST` | (C11) | -+| Label | Name | Type | Constraints | -+|-------|------------------------------|--------------------------------------------------------------|--------------------------------| -+| (I1) | `lhs` | tensor or per-tensor quantized tensor | (C5-C6), (C9-C10), (C12-C16) | -+| (I2) | `rhs` | tensor or quantized tensor | (C7-C10), (C12), (C18-C19) | -+| (I3) | `lhs_batching_dimensions` | 1-dimensional tensor constant of type `si64` | (C1), (C3), (C5), (C9), (C12) | -+| (I4) | `rhs_batching_dimensions` | 1-dimensional tensor constant of type `si64` | (C1), (C4), (C7), (C9) | -+| (I5) | `lhs_contracting_dimensions` | 1-dimensional tensor constant of type `si64` | (C2), (C3), (C6), (C10) | -+| (I6) | `rhs_contracting_dimensions` | 1-dimensional tensor constant of type `si64` | (C2), (C4), (C8), (C10), (C19) | -+| (I7) | `precision_config` | variadic number of enums of `DEFAULT`, `HIGH`, and `HIGHEST` | (C11) | - - #### Outputs - - | Name | Type | Constraints | - |----------|----------------------------|----------------------------| --| `result` | tensor or quantized tensor | (C12), (C14), (C18-C20) | -+| `result` | tensor or quantized tensor | (C12), (C14), (C16), (C18) | - - #### Constraints - -@@ -2589,17 +2589,14 @@ - * If the operation uses non-quantized tensors: - * (C13) `element_type(lhs) = element_type(rhs)`. - * If the operation uses quantized tensors: -- * (C14) `is_quantized(lhs) = is_quantized(result) and is_quantized(rhs)`. -- * (C15) `zero_points(rhs) = 0`. -- * (C16) If `is_per_axis_quantized(rhs)`, then -+ * (C14) `is_quantized(lhs) and is_quantized(rhs) and is_quantized(result)`. -+ * (C15) `storage_type(lhs) = storage_type(rhs)`. -+ * (C16) `expressed_type(lhs) = expressed_type(rhs) = expressed_type(result)`. -+ * (C17) `zero_points(rhs) = 0`. -+ * (C18) If `is_per_tensor_quantized(rhs)`, then -+ `is_per_tensor_quantized(result)`. -+ * (C19) If `is_per_axis_quantized(rhs)`, then - `quantization_dimension(rhs)` not in `rhs_contracting_dimensions`. -- * If `is_quantized(lhs)`: -- * (C17) `storage_type(lhs) = storage_type(rhs)`. -- * (C18) `expressed_type(lhs) = expressed_type(rhs) = expressed_type(result)`. -- * (C19) If `is_per_tensor_quantized(rhs)`, then -- `is_per_tensor_quantized(result)`. -- * If `!is_quantized(lhs)`: -- * (C20) `element_type(lhs) = expressed_type(rhs) = element_type(result)`. - - #### Examples - diff --ruN a/stablehlo/stablehlo/CMakeLists.txt b/stablehlo/stablehlo/CMakeLists.txt --- stablehlo/stablehlo/CMakeLists.txt +++ stablehlo/stablehlo/CMakeLists.txt @@ -250,273 +175,53 @@ diff --ruN a/stablehlo/stablehlo/CMakeLists.txt b/stablehlo/stablehlo/CMakeLists add_subdirectory(integrations) add_subdirectory(reference) add_subdirectory(tests) -diff --ruN a/stablehlo/stablehlo/conversions/linalg/tests/convolution.mlir b/stablehlo/stablehlo/conversions/linalg/tests/convolution.mlir ---- stablehlo/stablehlo/conversions/linalg/tests/convolution.mlir -+++ stablehlo/stablehlo/conversions/linalg/tests/convolution.mlir -@@ -356,7 +356,7 @@ - } - // CHECK-DAG: %[[CST:.+]] = arith.constant 0.000000e+00 : f32 - // CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[FILTER]] {{\[}}[0, 1, 2, 3]] : tensor<2x2x1x6xf32> into tensor<24xf32> --// CHECK: %[[EXPAND:.+]] = tensor.expand_shape %[[COLLAPSE]] {{\[}}[0, 1, 2, 3]] : tensor<24xf32> into tensor<2x2x2x3xf32> -+// CHECK: %[[EXPAND:.+]] = tensor.expand_shape %[[COLLAPSE]] {{\[}}[0, 1, 2, 3]] output_shape [2, 2, 2, 3] : tensor<24xf32> into tensor<2x2x2x3xf32> - // CHECK: %[[INIT:.+]] = tensor.empty() : tensor<2x3x4x2x3xf32> - // CHECK: %[[FILL:.+]] = linalg.fill ins(%[[CST]] : f32) outs(%[[INIT]] : tensor<2x3x4x2x3xf32>) -> tensor<2x3x4x2x3xf32> - // CHECK: %[[OUT:.+]] = linalg.depthwise_conv_2d_nhwc_hwcm -diff --ruN a/stablehlo/stablehlo/conversions/linalg/tests/miscellaneous.mlir b/stablehlo/stablehlo/conversions/linalg/tests/miscellaneous.mlir ---- stablehlo/stablehlo/conversions/linalg/tests/miscellaneous.mlir -+++ stablehlo/stablehlo/conversions/linalg/tests/miscellaneous.mlir -@@ -865,7 +865,7 @@ - %0 = "stablehlo.reshape"(%arg0) : (tensor) -> tensor<1xi32> - func.return %0 : tensor<1xi32> - } --// CHECK: tensor.expand_shape %{{.*}} [] : tensor into tensor<1xi32> -+// CHECK: tensor.expand_shape %{{.*}} [] output_shape [1] : tensor into tensor<1xi32> - - // ----- - -@@ -876,7 +876,7 @@ - func.return %0 : tensor<1xui32> - } - // CHECK: %[[ARG_SIGNLESS:.*]] = builtin.unrealized_conversion_cast %[[ARG_UNSIGNED]] : tensor to tensor --// CHECK: %[[RET_SIGNLESS:.*]] = tensor.expand_shape %[[ARG_SIGNLESS]] [] : tensor into tensor<1xi32> -+// CHECK: %[[RET_SIGNLESS:.*]] = tensor.expand_shape %[[ARG_SIGNLESS]] [] output_shape [1] : tensor into tensor<1xi32> - // CHECK: %[[RET_UNSIGNED:.*]] = builtin.unrealized_conversion_cast %[[RET_SIGNLESS]] : tensor<1xi32> to tensor<1xui32> - // CHECK: return %[[RET_UNSIGNED]] : tensor<1xui32> - -@@ -978,7 +978,7 @@ - } - // CHECK: %[[FLATTEN:.*]] = tensor.collapse_shape %{{.*}} {{\[}}[0, 1]] : tensor into tensor - // CHECK: %[[CAST:.*]] = tensor.cast %[[FLATTEN]] : tensor to tensor<40xf32> --// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1, 2]] : tensor<40xf32> into tensor<2x4x5xf32> -+// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1, 2]] output_shape [2, 4, 5] : tensor<40xf32> into tensor<2x4x5xf32> - - // ----- - -@@ -988,7 +988,7 @@ - func.return %0 : tensor<1x3xi32> - } - // CHECK: %[[CAST:.*]] = tensor.cast %{{.*}} : tensor to tensor<3xi32> --// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1]] : tensor<3xi32> into tensor<1x3xi32> -+// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1]] output_shape [1, 3] : tensor<3xi32> into tensor<1x3xi32> - - // ----- - -diff --ruN a/stablehlo/stablehlo/conversions/linalg/tests/random.mlir b/stablehlo/stablehlo/conversions/linalg/tests/random.mlir ---- stablehlo/stablehlo/conversions/linalg/tests/random.mlir -+++ stablehlo/stablehlo/conversions/linalg/tests/random.mlir -@@ -480,8 +480,8 @@ - // CHECK-DAG: %[[VAL_101:.*]] = arith.xori %[[VAL_100]], %[[VAL_87]] : i32 - - // CHECK: linalg.yield %[[YIELDED_1:.*]], %[[YIELDED_2:.*]] : i64, i64 --// CHECK-DAG: %[[VAL_206:.*]] = tensor.expand_shape %[[VAL_207:.*]]#0 {{\[\[}}0, 1]] : tensor<4xi64> into tensor<4x1xi64> --// CHECK-DAG: %[[VAL_208:.*]] = tensor.expand_shape %[[VAL_207]]#1 {{\[\[}}0, 1]] : tensor<4xi64> into tensor<4x1xi64> -+// CHECK-DAG: %[[VAL_206:.*]] = tensor.expand_shape %[[VAL_207:.*]]#0 {{\[\[}}0, 1]] -+// CHECK-DAG: %[[VAL_208:.*]] = tensor.expand_shape %[[VAL_207]]#1 {{\[\[}}0, 1]] - // CHECK-DAG: %[[VAL_209:.*]] = tensor.empty() : tensor<4x2xi64> - // CHECK-DAG: %[[VAL_213:.*]] = tensor.insert %[[VAL_30]] into %[[VAL_0]]{{\[}}%[[VAL_19]]] : tensor<2xi64> - -@@ -575,10 +575,10 @@ - // CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[CONCAT]] - - --// CHECK: %[[VAL_213:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] : tensor<80xi32> into tensor<80x1xi32> -+// CHECK: %[[VAL_213:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] - // CHECK: %[[VAL_214:.*]] = tensor.extract_slice %[[VAL_213]][0, 0] [77, 1] [1, 1] : tensor<80x1xi32> to tensor<77x1xi32> - // CHECK: %[[VAL_215:.*]] = tensor.collapse_shape %[[VAL_214]] {{\[\[}}0, 1]] : tensor<77x1xi32> into tensor<77xi32> --// CHECK: %[[VAL_216:.*]] = tensor.expand_shape %[[VAL_215]] {{\[\[}}0, 1]] : tensor<77xi32> into tensor<7x11xi32> -+// CHECK: %[[VAL_216:.*]] = tensor.expand_shape %[[VAL_215]] {{\[\[}}0, 1]] - // CHECK: %[[VAL_217:.*]] = tensor.insert %[[NEWSTATE]] into %[[ARG0]]{{\[}}%[[C1]]] : tensor<2xi64> - // CHECK: return %[[VAL_217]], %[[VAL_216]] : tensor<2xi64>, tensor<7x11xi32> - -@@ -616,10 +616,10 @@ - // CHECK-DAG: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[CONCAT]] {{\[\[}}0, 1]] : tensor<8x2xi64> into tensor<16xi64> - - --// CHECK-DAG: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] : tensor<16xi64> into tensor<16x1xi64> -+// CHECK-DAG: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] - // CHECK-DAG: %[[SLICE:.*]] = tensor.extract_slice %[[EXPANDED]][0, 0] [15, 1] [1, 1] : tensor<16x1xi64> to tensor<15x1xi64> - // CHECK-DAG: %[[EXPAND_2:.*]] = tensor.collapse_shape %[[SLICE]] {{\[\[}}0, 1]] : tensor<15x1xi64> into tensor<15xi64> --// CHECK-DAG: %[[RESHAPE:.*]] = tensor.expand_shape %[[EXPAND_2]] {{\[\[}}0, 1]] : tensor<15xi64> into tensor<3x5xi64> -+// CHECK-DAG: %[[RESHAPE:.*]] = tensor.expand_shape %[[EXPAND_2]] {{\[\[}}0, 1]] - // CHECK-DAG: %[[INSERTED:.+]] = tensor.insert %[[NEWSTATE]] into %[[ARG0]][%[[C1]]] : tensor<2xi64> - // CHECK: return %[[INSERTED]], %[[RESHAPE]] - diff --ruN a/stablehlo/stablehlo/conversions/tosa/tests/binary.mlir b/stablehlo/stablehlo/conversions/tosa/tests/binary.mlir --- stablehlo/stablehlo/conversions/tosa/tests/binary.mlir +++ stablehlo/stablehlo/conversions/tosa/tests/binary.mlir -@@ -45,14 +45,14 @@ - - // CHECK-LABEL: @divide - func.func @divide(%arg0 : tensor<10xi32>, %arg1 : tensor<10xi32>) -> tensor<10xi32> { -- // CHECK: tosa.div -+ // CHECK: tosa.int_div - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi32> - return %0 : tensor<10xi32> - } - - // CHECK-LABEL: @divide_f32 - func.func @divide_f32(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10xf32> { -- // tosa.div only supports i32, so this should not legalize. -+ // tosa.int_div only supports i32, so this should not legalize. - // CHECK: stablehlo.divide - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32> - return %0 : tensor<10xf32> -diff --ruN a/stablehlo/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll b/stablehlo/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll ---- stablehlo/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll -+++ stablehlo/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll -@@ -125,7 +125,7 @@ - Pattern => - replace op(input0 : Value<_: Tosa_Int32Tensor>, - input1 : Value<_: Tosa_Int32Tensor>) -- with op(input0, input1); -+ with op(input0, input1); - Pattern => - replace op(input0 : Value<_: Tosa_Tensor>, - input1 : Value<_: Tosa_Tensor>) -diff --ruN a/stablehlo/stablehlo/dialect/Base.cpp b/stablehlo/stablehlo/dialect/Base.cpp ---- stablehlo/stablehlo/dialect/Base.cpp -+++ stablehlo/stablehlo/dialect/Base.cpp -@@ -651,14 +651,14 @@ - - // quantized_type_c5 - auto maxPosFiniteNum = -- APFloat::getLargest(quantizedElementType.getExpressedType() -- .cast() -- .getFloatSemantics()) -+ APFloat::getLargest( -+ cast(quantizedElementType.getExpressedType()) -+ .getFloatSemantics()) - .convertToDouble(); - auto minPosFiniteNum = -- APFloat::getSmallest(quantizedElementType.getExpressedType() -- .cast() -- .getFloatSemantics()) -+ APFloat::getSmallest( -+ cast(quantizedElementType.getExpressedType()) -+ .getFloatSemantics()) - .convertToDouble(); - if (llvm::any_of(scales, [&](double scale) { - return scale < minPosFiniteNum || scale > maxPosFiniteNum; -diff --ruN a/stablehlo/stablehlo/dialect/TypeInference.cpp b/stablehlo/stablehlo/dialect/TypeInference.cpp ---- stablehlo/stablehlo/dialect/TypeInference.cpp -+++ stablehlo/stablehlo/dialect/TypeInference.cpp -@@ -171,7 +171,7 @@ - LogicalResult verifyConvolutionDotGeneralCommonQuantizationConstraints( - std::optional location, Type lhsElementType, Type rhsElementType, - Type resultElementType) { -- // convolution_c28 and dot_general_c14 -+ // convolution_c28 - if (!isa(rhsElementType) || - (isa(lhsElementType) != - isa(resultElementType))) { -@@ -184,19 +184,19 @@ - auto rhsQuantType = cast(rhsElementType); - if (auto lhsQuantType = dyn_cast(lhsElementType)) { - auto resultQuantType = cast(resultElementType); -- // convolution_c31 and dot_general_c17 -+ // convolution_c31 - if (lhsQuantType.getStorageType() != rhsQuantType.getStorageType()) { - return emitOptionalError( - location, "mismatched lhs and rhs quantization storage types"); - } -- // convolution_c32 and dot_general_c18 -+ // convolution_c32 - if (lhsQuantType.getExpressedType() != rhsQuantType.getExpressedType() || - lhsQuantType.getExpressedType() != resultQuantType.getExpressedType()) { - return emitOptionalError( - location, - "mismatched lhs, rhs and result quantization expressed types"); - } -- // convolution_c33 and dot_general_c19 -+ // convolution_c33 - if (isa(rhsQuantType) && - !isa(resultQuantType)) { - return emitOptionalError( -@@ -204,7 +204,7 @@ - } - } else { - Type rhsExpressedType = rhsQuantType.getExpressedType(); -- // convolution_c34 and dot_general_c20 -+ // convolution_c34 - if (lhsElementType != rhsExpressedType || - lhsElementType != resultElementType) { - return emitOptionalError(location, -@@ -3559,7 +3559,7 @@ - } - } - -- // convolution_c28, convolution_c31 - convolution_c34 -+ // convolution_c31 - convolution_c34 - return verifyConvolutionDotGeneralCommonQuantizationConstraints( - location, lhsElementType, rhsElementType, resultElementType); - } -@@ -3626,41 +3626,6 @@ - return success(); - } - --LogicalResult verifyDotGeneralOpQuantizationConstraints( -- std::optional location, Type lhsType, Type rhsType, -- Type resultType, ArrayRef rhsContractingDimensions) { -- Type lhsElementType = getElementTypeOrSelf(lhsType); -- Type rhsElementType = getElementTypeOrSelf(rhsType); -- Type resultElementType = getElementTypeOrSelf(resultType); -- -- // dot_general_c15 -- if (auto rhsPerTensorQuantType = -- dyn_cast(rhsElementType)) { -- if (rhsPerTensorQuantType.getZeroPoint() != 0) { -- return emitOptionalError(location, "Zero point of rhs should be 0"); -- } -- } else if (auto rhsPerAxisQuantType = -- dyn_cast(rhsElementType)) { -- if (llvm::any_of(rhsPerAxisQuantType.getZeroPoints(), -- [](int64_t zero_point) { return zero_point != 0; })) { -- return emitOptionalError(location, "Zero points of rhs should be 0"); -- } -- -- // dot_general_c16 -- if (llvm::is_contained(rhsContractingDimensions, -- rhsPerAxisQuantType.getQuantizedDimension())) { -- return emitOptionalError( -- location, -- "Quantization dimension of rhs should not be in the " -- "contracting dimension of rhs"); -- } -- } -- -- // dot_general_c14, dot_general_c17 - dot_general_c20 -- return verifyConvolutionDotGeneralCommonQuantizationConstraints( -- location, lhsElementType, rhsElementType, resultElementType); --} -- - LogicalResult verifyDotGeneralOp(std::optional location, Value lhs, - Value rhs, - ArrayRef lhsBatchingDimensions, -@@ -3683,13 +3648,6 @@ - return emitOptionalError( - location, "inferred shape '", dimSizesToString(inferredShape.getDims()), - "' ", "is incompatible with return type of operation ", resultType, ""); -- -- Type lhsType = lhs.getType(); -- Type rhsType = rhs.getType(); -- if (anyQuantized({lhsType, rhsType, resultType})) { -- return verifyDotGeneralOpQuantizationConstraints( -- location, lhsType, rhsType, resultType, rhsContractingDimensions); -- } - return success(); +@@ -155,7 +155,7 @@ + + // CHECK-LABEL: @maximum_f64 + func.func @maximum_f64(%arg0 : tensor<10xf64>, %arg1 : tensor<10xf64>) -> tensor<10xf64> { +- // CHECK: stablehlo.maximum ++ // CHECK: tosa.maximum + %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor<10xf64>, tensor<10xf64>) -> tensor<10xf64> + return %0 : tensor<10xf64> + } +diff --ruN a/stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir b/stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir +--- stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir ++++ stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir +@@ -9,8 +9,7 @@ + + // CHECK-LABEL: @constant_f64 + func.func @constant_f64() -> tensor<10xf64> { +- // TOSA does not support 64-bit types, so this should not legalize. +- // CHECK: stablehlo.constant ++ // CHECK: tosa.const + %0 = stablehlo.constant dense<0.000000e+00> : tensor<10xf64> + return %0 : tensor<10xf64> + } +diff --ruN a/stablehlo/stablehlo/dialect/AssemblyFormat.cpp b/stablehlo/stablehlo/dialect/AssemblyFormat.cpp +--- stablehlo/stablehlo/dialect/AssemblyFormat.cpp ++++ stablehlo/stablehlo/dialect/AssemblyFormat.cpp +@@ -305,8 +305,7 @@ + bool isCommutativeNoRegionMatchingDialect(OperationName innerOp, + StringRef reduceOpDialect) { + auto innerOpDialect = innerOp.getDialect(); +- return innerOpDialect && +- innerOpDialect->getNamespace().equals(reduceOpDialect) && ++ return innerOpDialect && innerOpDialect->getNamespace() == reduceOpDialect && + innerOp.hasTrait::Impl>() && + innerOp.hasTrait() && + (innerOp.hasTrait() || +@@ -359,7 +358,7 @@ + // Check E5. + LLVM_DEBUG(llvm::dbgs() << "Checking ReduceOp compact print E5\n"); + auto retOp = block.getTerminator(); +- if (!retOp->getName().stripDialect().equals("return")) return false; ++ if (retOp->getName().stripDialect() != "return") return false; + + return llvm::equal(innerOp.getResults(), retOp->getOperands()); } - -@@ -3861,8 +3819,8 @@ - if (SmallVector shape; operandType.hasStaticShape() && - matchInts(outputShape, shape).succeeded()) { - int64_t operandCount = operandType.getNumElements(); -- int64_t shapeCount = std::accumulate(shape.begin(), shape.end(), 1, -- std::multiplies()); -+ int64_t shapeCount = std::accumulate( -+ shape.begin(), shape.end(), int64_t{1}, std::multiplies()); - if (operandCount != shapeCount) { - return emitOptionalError(location, - "output_shape is incompatible with input type " diff --ruN a/stablehlo/stablehlo/experimental/BUILD.bazel b/stablehlo/stablehlo/experimental/BUILD.bazel --- stablehlo/stablehlo/experimental/BUILD.bazel +++ stablehlo/stablehlo/experimental/BUILD.bazel @@ -2906,23052 +2611,37 @@ diff --ruN a/stablehlo/stablehlo/experimental/transforms/StablehloRefineShapes.c +} // namespace experimental +} // namespace stablehlo +} // namespace mlir -diff --ruN a/stablehlo/stablehlo/integrations/python/tests/stablehlo.py b/stablehlo/stablehlo/integrations/python/tests/stablehlo.py ---- stablehlo/stablehlo/integrations/python/tests/stablehlo.py -+++ stablehlo/stablehlo/integrations/python/tests/stablehlo.py -@@ -241,18 +241,18 @@ - # Formatted as (tensor_type, np_value) - # Program runs arg + arg, which is used for expected value - tests = [ -- # No numpy types for f8 - skipping fp8 tests -- ("f16", np.asarray(1, np.float16)), -- ("f32", np.asarray(2, np.float32)), -- ("f64", np.asarray(3, np.double)), -- ("1xi8", np.asarray([4], np.int8)), -- ("1xi16", np.asarray([5], np.int16)), -- ("1xi32", np.asarray([-6], np.int32)), -- # Numpy's uint treated as int by DenseElementsAttr, skipping np.uint tests -- ("2x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2,2)), -- ("2x1x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2,1,2)), -- ("?x?xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2,2)), -- ("?x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2,2)), -+ # No numpy types for f8 - skipping fp8 tests -+ ("f16", np.asarray(1, np.float16)), -+ ("f32", np.asarray(2, np.float32)), -+ ("f64", np.asarray(3, np.double)), -+ ("1xi8", np.asarray([4], np.int8)), -+ ("1xi16", np.asarray([5], np.int16)), -+ ("1xi32", np.asarray([-6], np.int32)), -+ # Numpy's uint treated as int by DenseElementsAttr, skipping np.uint tests -+ ("2x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2, 2)), -+ ("2x1x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2, 1, 2)), -+ ("?x?xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2, 2)), -+ ("?x2xf16", np.asarray([1, 2, 3, 4], np.float16).reshape(2, 2)), - ] - for test in tests: - tensor_type, arg = test -diff --ruN a/stablehlo/stablehlo/tests/ops_stablehlo_quantized.mlir b/stablehlo/stablehlo/tests/ops_stablehlo_quantized.mlir ---- stablehlo/stablehlo/tests/ops_stablehlo_quantized.mlir -+++ stablehlo/stablehlo/tests/ops_stablehlo_quantized.mlir -@@ -1066,146 +1066,6 @@ - - // ----- - --func.func @dot_general_hybrid_quantized(%arg0: tensor<2x3x4xf32>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> { -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4xf32>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> -- func.return %0 : tensor<2x4x5xf32> --} -- --// ----- -- --func.func @dot_general_c14(%arg0: tensor<2x3x4xf32>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{rhs should be quantized for quantized operations and is_quantized(lhs)=is_quantized(result) should hold}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4xf32>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> -- func.return %0 : tensor<2x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c15_per_tensor(%arg0: tensor<2x3x4xf32>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> { -- // expected-error@+1 {{Zero point of rhs should be 0}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4xf32>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> -- func.return %0 : tensor<2x4x5xf32> --} -- --// ----- -- --func.func @dot_general_c15_per_axis( -- %arg0: tensor<2x3x4x!quant.uniform>, -- %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{Zero points of rhs should be 0}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4x!quant.uniform>, -- tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> -- func.return %0 : tensor<2x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c16( -- %arg0: tensor<2x3x4x!quant.uniform>, -- %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{Quantization dimension of rhs should not be in the contracting dimension of rhs}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [1], -- rhs_batching_dimensions = [1], -- lhs_contracting_dimensions = [0], -- rhs_contracting_dimensions = [0] -- > -- } : (tensor<2x3x4x!quant.uniform>, -- tensor<2x3x5x!quant.uniform>) -> tensor<3x4x5x!quant.uniform> -- func.return %0 : tensor<3x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c17(%arg0: tensor<2x3x4x!quant.uniform>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{mismatched lhs and rhs quantization storage types}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4x!quant.uniform>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> -- func.return %0 : tensor<2x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c18(%arg0: tensor<2x3x4x!quant.uniform>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{mismatched lhs, rhs and result quantization expressed types}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4x!quant.uniform>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> -- func.return %0 : tensor<2x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c19(%arg0: tensor<2x3x4x!quant.uniform>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> { -- // expected-error@+1 {{mismatched rhs and result quantization granularity}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4x!quant.uniform>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5x!quant.uniform> -- func.return %0 : tensor<2x4x5x!quant.uniform> --} -- --// ----- -- --func.func @dot_general_c20(%arg0: tensor<2x3x4xf32>, %arg1: tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> { -- // expected-error@+1 {{mismatched rhs quantization expressed type and lhs and result element type}} -- %0 = "stablehlo.dot_general"(%arg0, %arg1) { -- dot_dimension_numbers = #stablehlo.dot< -- lhs_batching_dimensions = [0], -- rhs_batching_dimensions = [0], -- lhs_contracting_dimensions = [1], -- rhs_contracting_dimensions = [1] -- > -- } : (tensor<2x3x4xf32>, tensor<2x3x5x!quant.uniform>) -> tensor<2x4x5xf32> -- func.return %0 : tensor<2x4x5xf32> --} -- --// ----- -- - func.func @quantized_element_type_c8(%arg0: tensor<1x2x!quant.uniform:f32, 1.0:300>>) { - // expected-error-re@+1 {{operand #0 must be ranked tensor of {{.*}} 4/8/16/32-bit uniform quantized signed integer or 4/8/16/32-bit uniform quantized unsigned integer or 4/8/16/32-bit uniform quantized per axis signed integer or 4/8/16/32-bit uniform quantized per axis unsigned integer values, but got 'tensor<1x2x!quant.uniform>'}} - %0 = stablehlo.add %arg0, %arg0 : tensor<1x2x!quant.uniform:f32, 1.0:300>> -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,183 +2223,209 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,190 +2223,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,190 +2223,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,190 +2223,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,190 +2223,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -448,8 +484,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -466,8 +503,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -486,8 +524,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -495,8 +534,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -508,8 +548,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -537,8 +578,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -564,15 +606,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -593,8 +636,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -603,8 +647,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -612,8 +657,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -625,8 +671,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -638,8 +685,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -661,8 +709,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -684,8 +733,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -713,8 +763,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -742,8 +793,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -762,29 +814,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -800,8 +856,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -823,8 +880,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -842,22 +900,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -869,8 +930,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -882,8 +944,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -895,15 +958,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -913,8 +978,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -924,9 +990,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -935,22 +1002,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -960,22 +1030,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -987,8 +1060,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1000,15 +1074,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1018,6 +1094,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1029,15 +1106,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1071,8 +1150,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1085,8 +1165,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1096,8 +1177,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1128,15 +1210,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1156,8 +1240,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1167,8 +1252,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1182,8 +1268,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1217,8 +1304,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1238,8 +1326,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1249,22 +1338,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1274,15 +1366,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1292,22 +1386,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1319,8 +1416,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1333,16 +1431,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1364,8 +1463,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1375,8 +1475,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1386,11 +1487,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1401,15 +1503,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1432,36 +1536,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1479,57 +1588,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1539,8 +1656,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1554,36 +1672,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1596,8 +1719,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1612,8 +1736,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1625,8 +1750,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1650,8 +1776,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1677,8 +1804,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1698,16 +1826,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1716,8 +1846,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1727,8 +1858,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1738,8 +1870,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1749,29 +1882,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1801,8 +1938,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1832,15 +1970,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1853,8 +1993,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1864,43 +2005,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1914,8 +2061,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1935,29 +2083,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1969,8 +2121,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1980,8 +2133,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1991,8 +2145,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2008,15 +2163,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2026,22 +2183,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2059,8 +2219,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2068,190 +2229,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_collective_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -431,8 +467,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -478,8 +516,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -498,8 +537,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -507,8 +547,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -520,8 +561,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -549,8 +591,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -576,15 +619,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -605,8 +649,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -615,8 +660,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -624,8 +670,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -637,8 +684,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -650,8 +698,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -673,8 +722,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -696,8 +746,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -725,8 +776,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -754,8 +806,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -774,29 +827,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -812,8 +869,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -835,8 +893,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -854,22 +913,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -881,8 +943,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -894,8 +957,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -907,15 +971,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -925,8 +991,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -936,9 +1003,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -947,22 +1015,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -972,22 +1043,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -999,8 +1073,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1012,15 +1087,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1030,6 +1107,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1041,15 +1119,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1083,8 +1163,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1097,8 +1178,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1108,8 +1190,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1140,15 +1223,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1168,8 +1253,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1179,8 +1265,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1194,8 +1281,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1229,8 +1317,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1250,8 +1339,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1261,22 +1351,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1286,15 +1379,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1304,22 +1399,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1331,8 +1429,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1345,16 +1444,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1376,8 +1476,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1387,8 +1488,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1398,11 +1500,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1413,15 +1516,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1444,36 +1549,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1491,57 +1601,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1551,8 +1669,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1566,36 +1685,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1608,8 +1732,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1624,8 +1749,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1637,8 +1763,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1662,8 +1789,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1689,8 +1817,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1710,16 +1839,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1728,8 +1859,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1739,8 +1871,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1750,8 +1883,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1761,29 +1895,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1813,8 +1951,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1844,15 +1983,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1865,8 +2006,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1876,43 +2018,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1926,8 +2074,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1947,29 +2096,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1981,8 +2134,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1992,8 +2146,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -2003,8 +2158,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2020,15 +2176,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2038,22 +2196,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2071,8 +2232,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2080,190 +2242,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_collective_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -431,8 +467,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -478,8 +516,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -498,8 +537,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -507,8 +547,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -520,8 +561,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -549,8 +591,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -576,15 +619,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -605,8 +649,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -615,8 +660,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -624,8 +670,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -637,8 +684,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -650,8 +698,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -673,8 +722,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -696,8 +746,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -725,8 +776,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -754,8 +806,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -774,29 +827,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -812,8 +869,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -836,7 +894,7 @@ - - // CHECK-LABEL: "op_all_reduce_with_promotable_types" - func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -854,8 +912,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -873,22 +932,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -900,8 +962,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -913,8 +976,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -926,15 +990,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -944,8 +1010,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -955,9 +1022,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -966,22 +1034,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -991,22 +1062,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -1018,8 +1092,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1031,15 +1106,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1049,6 +1126,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1060,15 +1138,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1102,8 +1182,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1116,8 +1197,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1127,8 +1209,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1159,15 +1242,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1187,8 +1272,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1198,8 +1284,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1213,8 +1300,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1248,8 +1336,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1269,8 +1358,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1280,22 +1370,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1305,15 +1398,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1323,22 +1418,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1350,8 +1448,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1364,16 +1463,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1395,8 +1495,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1406,8 +1507,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1417,11 +1519,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1432,15 +1535,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1463,36 +1568,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1510,57 +1620,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1570,8 +1688,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1585,36 +1704,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1627,8 +1751,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1643,8 +1768,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1658,7 +1784,7 @@ - // CHECK_lABEL: "op_reduce_with_promotable_types" - func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) - -> (tensor<4xf64>) { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> -@@ -1673,8 +1799,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1699,7 +1826,7 @@ - - // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" - func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> -@@ -1716,8 +1843,9 @@ - - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1742,11 +1870,11 @@ - func.return %0 : tensor<2x9x16x7xf32> - } - --// CHECK_lABEL: "op_reduce_window_with_promotable_types" -+// CHECK-LABEL: "op_reduce_window_with_promotable_types" - func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, - %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> - (tensor<2x2xf64>, tensor<2x2xf32>) { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) -@@ -1766,8 +1894,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1787,16 +1916,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1805,8 +1936,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1816,8 +1948,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1827,8 +1960,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1838,29 +1972,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1893,7 +2031,7 @@ - func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, - %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> - tensor<200x100x300xf64> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> -@@ -1916,8 +2054,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1947,8 +2086,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () -@@ -1970,15 +2110,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1991,8 +2133,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -2002,43 +2145,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -2052,8 +2201,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -2073,29 +2223,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -2107,8 +2261,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -2118,8 +2273,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -2129,8 +2285,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2146,15 +2303,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2164,22 +2323,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2197,8 +2359,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2206,190 +2369,217 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_collective_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -431,8 +467,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -478,8 +516,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -498,8 +537,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -507,8 +547,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -520,8 +561,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -549,8 +591,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -576,15 +619,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -605,8 +649,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -615,8 +660,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -624,8 +670,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -637,8 +684,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -650,8 +698,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -673,8 +722,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -696,8 +746,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -725,8 +776,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -754,8 +806,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -774,29 +827,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -812,8 +869,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -836,7 +894,7 @@ - - // CHECK-LABEL: "op_all_reduce_with_promotable_types" - func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -854,8 +912,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -873,22 +932,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -900,8 +962,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -913,8 +976,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -926,15 +990,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -944,8 +1010,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -955,9 +1022,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -966,22 +1034,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -991,22 +1062,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -1018,8 +1092,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1031,15 +1106,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1049,6 +1126,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1060,15 +1138,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1102,8 +1182,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1116,8 +1197,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1127,8 +1209,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1159,15 +1242,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1187,8 +1272,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1198,8 +1284,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1213,8 +1300,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1248,8 +1336,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1269,8 +1358,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1280,22 +1370,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1305,15 +1398,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1323,22 +1418,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1350,8 +1448,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1364,16 +1463,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1395,8 +1495,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1406,8 +1507,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1417,11 +1519,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1432,15 +1535,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1463,36 +1568,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1510,57 +1620,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1570,8 +1688,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1585,36 +1704,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1627,8 +1751,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1643,8 +1768,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1658,7 +1784,7 @@ - // CHECK_lABEL: "op_reduce_with_promotable_types" - func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) - -> (tensor<4xf64>) { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> -@@ -1673,8 +1799,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1699,7 +1826,7 @@ - - // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" - func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> -@@ -1716,8 +1843,9 @@ - - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1746,7 +1874,7 @@ - func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, - %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> - (tensor<2x2xf64>, tensor<2x2xf32>) { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) -@@ -1766,8 +1894,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1787,16 +1916,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1805,8 +1936,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1816,8 +1948,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1827,8 +1960,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1838,29 +1972,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1893,7 +2031,7 @@ - func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, - %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> - tensor<200x100x300xf64> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> -@@ -1916,8 +2054,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1947,8 +2086,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () -@@ -1970,15 +2110,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1991,8 +2133,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -2002,43 +2145,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -2052,8 +2201,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -2073,29 +2223,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -2107,8 +2261,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -2118,8 +2273,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -2129,8 +2285,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2146,15 +2303,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2164,22 +2323,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2197,8 +2359,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2206,197 +2369,225 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_per_tensor_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_per_tensor_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_per_axis_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_per_axis_quantization(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg0) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG0]]) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> - %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform> - func.return %0 : tensor<2x!quant.uniform> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_collective_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -431,8 +467,9 @@ - } - - // CHECK-LABEL: "default_composite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_composite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.composite_v1"(%arg0) <{ -+ // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ - // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{}> - // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> - // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> -@@ -446,8 +483,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -475,8 +513,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -493,8 +532,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -513,8 +553,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -522,8 +563,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -535,8 +577,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -564,8 +607,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -591,15 +635,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -620,8 +665,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -630,8 +676,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -639,8 +686,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -652,8 +700,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -665,8 +714,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -688,8 +738,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -711,8 +762,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -740,8 +792,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -769,8 +822,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -789,29 +843,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -827,8 +885,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -851,7 +910,7 @@ - - // CHECK-LABEL: "op_all_reduce_with_promotable_types" - func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -869,8 +928,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -888,22 +948,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -915,8 +978,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -928,8 +992,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -941,15 +1006,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -959,8 +1026,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -970,9 +1038,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -981,22 +1050,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -1006,22 +1078,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -1033,8 +1108,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1046,15 +1122,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_composite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_composite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.composite_v1"(%arg0) <{ -+ // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ - // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{#vhlo.string_v1<"my_int"> = #vhlo.integer_v1<1 : i64>, #vhlo.string_v1<"my_string"> = #vhlo.string_v1<"foo">}> - // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> - // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> -@@ -1073,8 +1151,9 @@ - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1084,6 +1163,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1095,15 +1175,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1137,8 +1219,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1151,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1162,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1194,15 +1279,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1222,8 +1309,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1233,8 +1321,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1248,8 +1337,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1283,8 +1373,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1304,8 +1395,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1315,22 +1407,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1340,15 +1435,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1358,22 +1455,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1385,8 +1485,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1399,16 +1500,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1430,8 +1532,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1441,8 +1544,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1452,11 +1556,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1467,15 +1572,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1498,36 +1605,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1545,57 +1657,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1605,8 +1725,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1620,36 +1741,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1662,8 +1788,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1678,8 +1805,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1690,10 +1818,10 @@ - func.return %0 : tensor - } - --// CHECK_lABEL: "op_reduce_with_promotable_types" -+// CHECK-LABEL: "op_reduce_with_promotable_types" - func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) - -> (tensor<4xf64>) { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> -@@ -1708,8 +1836,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1734,7 +1863,7 @@ - - // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" - func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> -@@ -1751,8 +1880,9 @@ - - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1781,7 +1911,7 @@ - func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, - %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> - (tensor<2x2xf64>, tensor<2x2xf32>) { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) -@@ -1801,8 +1931,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1822,16 +1953,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1840,8 +1973,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1851,8 +1985,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1862,8 +1997,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1873,29 +2009,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1928,7 +2068,7 @@ - func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, - %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> - tensor<200x100x300xf64> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> -@@ -1951,8 +2091,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1982,8 +2123,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () -@@ -2005,15 +2147,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -2026,8 +2170,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -2037,43 +2182,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -2087,8 +2238,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -2108,29 +2260,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -2142,8 +2298,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -2153,8 +2310,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -2164,8 +2322,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2181,15 +2340,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2199,22 +2360,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2232,8 +2396,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2241,197 +2406,225 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_per_tensor_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_per_tensor_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_per_axis_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_per_axis_quantization(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg0) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG0]]) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> - %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform> - func.return %0 : tensor<2x!quant.uniform> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -160,6 +175,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -170,6 +186,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -180,6 +197,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -190,6 +208,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -202,6 +221,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -210,6 +230,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -219,6 +240,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -228,6 +250,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -237,6 +260,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -246,6 +270,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -255,6 +280,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -264,6 +290,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -275,6 +302,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -287,6 +315,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -299,6 +328,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -313,10 +343,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -324,8 +353,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -339,8 +369,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -362,8 +393,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -380,8 +412,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -389,8 +422,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -401,8 +435,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -413,8 +448,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -442,8 +478,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -460,8 +497,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -480,8 +518,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -489,8 +528,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -502,8 +542,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -531,8 +572,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -558,15 +600,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -587,8 +630,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -597,8 +641,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -606,8 +651,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -619,8 +665,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -632,8 +679,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -655,8 +703,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -678,8 +727,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -707,8 +757,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -736,8 +787,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -756,29 +808,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -794,8 +850,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -817,8 +874,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -836,22 +894,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -863,8 +924,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -876,8 +938,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -889,15 +952,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -907,8 +972,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -918,9 +984,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -929,22 +996,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -954,22 +1024,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -981,8 +1054,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -994,15 +1068,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1012,6 +1088,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1023,15 +1100,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1065,8 +1144,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1079,8 +1159,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1090,8 +1171,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1122,15 +1204,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1150,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1161,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1176,8 +1262,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1211,8 +1298,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1232,8 +1320,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1243,22 +1332,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1268,15 +1360,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1286,22 +1380,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1313,8 +1410,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1327,16 +1425,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1358,8 +1457,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1369,8 +1469,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1380,11 +1481,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1395,15 +1497,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1426,36 +1530,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1473,57 +1582,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1533,8 +1650,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1548,36 +1666,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1590,8 +1713,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1606,8 +1730,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1619,8 +1744,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1644,8 +1770,9 @@ - } - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1671,8 +1798,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1692,16 +1820,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1710,8 +1840,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1721,8 +1852,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1732,8 +1864,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1743,29 +1876,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1795,8 +1932,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1826,15 +1964,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1847,8 +1987,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -1858,43 +1999,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1908,8 +2055,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -1929,29 +2077,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -1963,8 +2115,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -1974,8 +2127,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -1985,8 +2139,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2002,15 +2157,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2020,22 +2177,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2053,8 +2213,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2062,169 +2223,193 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" -diff --ruN a/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir b/stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir ---- stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir -+++ stablehlo/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir -@@ -13,6 +13,7 @@ - // ============ ATTRIBUTES ============ - - // CHECK-LABEL: "attr_comparison_direction_eq" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -22,6 +23,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ne" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -31,6 +33,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_ge" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -40,6 +43,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_gt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -49,6 +53,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_le" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -58,6 +63,7 @@ - } - - // CHECK-LABEL: "attr_comparison_direction_lt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - // CHECK: comparison_direction = #vhlo -@@ -67,6 +73,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_notype" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo -@@ -76,6 +83,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_float" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -86,6 +94,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_totalorder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -96,6 +105,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_signed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -106,6 +116,7 @@ - } - - // CHECK-LABEL: "attr_comparison_type_unsigned" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { - %0 = "stablehlo.compare"(%arg0, %arg1) { - comparison_direction = #stablehlo, -@@ -118,6 +129,7 @@ - // ConvDimensionNumbers aka #stablehlo.conv is covered below. - - // CHECK-LABEL: "attr_custom_call_api_version_unspecified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -128,6 +140,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_original" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -138,6 +151,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -148,6 +162,7 @@ - } - - // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo", -@@ -166,6 +181,7 @@ - // DotDimensionNumbers aka #stablehlo.dot is covered below. - - // CHECK-LABEL: "attr_fft_type_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -176,6 +192,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_ifft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -186,6 +203,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_rfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -196,6 +214,7 @@ - } - - // CHECK-LABEL: "attr_fft_type_irfft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { - %0 = "stablehlo.fft"(%arg0) { - // CHECK: fft_type = #vhlo -@@ -208,6 +227,7 @@ - // GatherDimensionNumbers aka #stablehlo.gather is covered below. - - // CHECK-LABEL: "attr_precision_config_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -216,6 +236,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_high" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -225,6 +246,7 @@ - } - - // CHECK-LABEL: "attr_precision_config_highest" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - %0 = "stablehlo.dot"(%arg0, %arg1) { - // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> -@@ -234,6 +256,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_default" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -243,6 +266,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_three_fry" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -252,6 +276,7 @@ - } - - // CHECK-LABEL: "attr_rng_algorithm_philox" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { - // CHECK: rng_algorithm = #vhlo -@@ -261,6 +286,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_uniform" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -270,6 +296,7 @@ - } - - // CHECK-LABEL: "attr_rng_distribution_normal" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { - // CHECK: rng_distribution = #vhlo -@@ -281,6 +308,7 @@ - // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. - - // CHECK-LABEL: "attr_transpose_no_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -293,6 +321,7 @@ - } - - // CHECK-LABEL: "attr_transpose_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -305,6 +334,7 @@ - } - - // CHECK-LABEL: "attr_transpose_adjoint" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { - left_side = true, -@@ -319,10 +349,9 @@ - // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. - - // CHECK-LABEL: "attr_type_extensions_bounds" --func.func @attr_type_extensions_bounds( -- %arg0: tensor>) -- -> tensor> { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) -+func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () - func.return %arg0 : tensor> - } - -@@ -330,8 +359,9 @@ - // ============ DEFAULTS ============ - - // CHECK-LABEL: "default_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -345,8 +375,9 @@ - } - - // CHECK-LABEL: "default_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) - // CHECK-SAME: <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -368,8 +399,9 @@ - } - - // CHECK-LABEL: "default_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -386,8 +418,9 @@ - } - - // CHECK-LABEL: "default_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> -@@ -395,8 +428,9 @@ - } - - // CHECK-LABEL: "default_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -407,8 +441,9 @@ - } - - // CHECK-LABEL: "default_collective_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -419,8 +454,9 @@ - } - - // CHECK-LABEL: "default_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -431,8 +467,9 @@ - } - - // CHECK-LABEL: "default_composite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_composite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.composite_v1"(%arg0) <{ -+ // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ - // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{}> - // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> - // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> -@@ -446,8 +483,9 @@ - } - - // CHECK-LABEL: "default_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -475,8 +513,9 @@ - } - - // CHECK-LABEL: "default_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -493,8 +532,9 @@ - } - - // CHECK-LABEL: "default_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -513,8 +553,9 @@ - } - - // CHECK-LABEL: "default_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> -@@ -522,8 +563,9 @@ - } - - // CHECK-LABEL: "default_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> -@@ -535,8 +577,9 @@ - } - - // CHECK-LABEL: "default_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -564,8 +607,9 @@ - } - - // CHECK-LABEL: "default_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -591,15 +635,16 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - func.return %arg0 : tensor - } - - // CHECK-LABEL: "dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -620,8 +665,9 @@ - } - - // CHECK-LABEL: "default_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -630,8 +676,9 @@ - } - - // CHECK-LABEL: "default_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token -@@ -639,8 +686,9 @@ - } - - // CHECK-LABEL: "default_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -652,8 +700,9 @@ - } - - // CHECK-LABEL: "default_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -665,8 +714,9 @@ - } - - // CHECK-LABEL: "default_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -688,8 +738,9 @@ - } - - // CHECK-LABEL: "default_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -711,8 +762,9 @@ - } - - // CHECK-LABEL: "default_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -740,8 +792,9 @@ - } - - // CHECK-LABEL: "default_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -769,8 +822,9 @@ - } - - // CHECK-LABEL: "default_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -789,29 +843,33 @@ - // ============ OPS ============ - - // CHECK-LABEL: "op_abs" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_abs(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_add" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_after_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 -+ // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token - func.return %0 : !stablehlo.token - } - - // CHECK-LABEL: "op_all_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.all_gather_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ - // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, -@@ -827,8 +885,9 @@ - } - - // CHECK-LABEL: "op_all_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_reduce(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 -@@ -851,7 +910,7 @@ - - // CHECK-LABEL: "op_all_reduce_with_promotable_types" - func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { -- // CHECK: "vhlo.all_reduce_v1"(%arg0) -+ // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -869,8 +928,9 @@ - } - - // CHECK-LABEL: "op_all_to_all" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { -- // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ -+ // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, -@@ -888,22 +948,25 @@ - } - - // CHECK-LABEL: "op_and" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_atan2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_batch_norm_grad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -915,8 +978,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_inference" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { -- // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ -+ // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> -@@ -928,8 +992,9 @@ - } - - // CHECK-LABEL: "op_batch_norm_training" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { -- // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, - // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -@@ -941,15 +1006,17 @@ - } - - // CHECK-LABEL: "op_bitcast_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_bitcast_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast_in_dim"(%arg0) { -@@ -959,8 +1026,9 @@ - } - - // CHECK-LABEL: "op_broadcast" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.broadcast_v1"(%arg0) <{ -+ // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ - // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> - %0 = "stablehlo.broadcast"(%arg0) { -@@ -970,9 +1038,10 @@ - } - - // CHECK-LABEL: "op_case" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -981,22 +1050,25 @@ - } - - // CHECK-LABEL: "op_cbrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cbrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_ceil" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_ceil(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_cholesky" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { -- // CHECK: "vhlo.cholesky_v1"(%arg0) <{ -+ // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ - // CHECK-SAME: lower = #vhlo.bool_v1 - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> - %0 = "stablehlo.cholesky"(%arg0) { -@@ -1006,22 +1078,25 @@ - } - - // CHECK-LABEL: "op_clamp" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_count_leading_zeros" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_collective_permute" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { -- // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ -+ // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> -@@ -1033,8 +1108,9 @@ - } - - // CHECK-LABEL: "op_compare" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: compare_type = #vhlo, - // CHECK-SAME: comparison_direction = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1046,15 +1122,17 @@ - } - - // CHECK-LABEL: "op_complex" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { -- // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_composite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_composite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.composite_v1"(%arg0) <{ -+ // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ - // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{#vhlo.string_v1<"my_int"> = #vhlo.integer_v1<1 : i64>, #vhlo.string_v1<"my_string"> = #vhlo.string_v1<"foo">}> - // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> - // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> -@@ -1073,8 +1151,9 @@ - } - - // CHECK-LABEL: "op_concatenate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.concatenate"(%arg0, %arg1) { -@@ -1084,6 +1163,7 @@ - } - - // CHECK-LABEL: "op_constant" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_constant(%arg0: tensor) -> tensor { - // CHECK: "vhlo.constant_v1"() <{ - // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> -@@ -1095,15 +1175,17 @@ - } - - // CHECK-LABEL: "op_convert" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_convert(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_convolution" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { -- // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1137,8 +1219,9 @@ - } - - // CHECK-LABEL: "op_cosine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cosine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1151,8 +1234,9 @@ - } - - // CHECK-LABEL: "op_cross_replica_sum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ -+ // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.cross-replica-sum"(%arg0) { -@@ -1162,8 +1246,9 @@ - } - - // CHECK-LABEL: "op_custom_call" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_custom_call(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.custom_call_v1"(%arg0) <{ -+ // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ - // CHECK-SAME: api_version = #vhlo, - // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, - // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, -@@ -1194,15 +1279,17 @@ - } - - // CHECK-LABEL: "op_divide" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dot_general" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { -- // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, -@@ -1222,8 +1309,9 @@ - } - - // CHECK-LABEL: "op_dot" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.dot"(%arg0, %arg1) { -@@ -1233,8 +1321,9 @@ - } - - // CHECK-LABEL: "op_dynamic_broadcast_in_dim" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1248,8 +1337,9 @@ - } - - // CHECK-LABEL: "op_dynamic_conv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<4xi32>) -> tensor<1x?x?x16xf32> { -- // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, -@@ -1283,8 +1373,9 @@ - } - - // CHECK-LABEL: "op_dynamic_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { -- // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1304,8 +1395,9 @@ - } - - // CHECK-LABEL: "op_dynamic_iota" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ -+ // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ - // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_iota"(%arg0) { -@@ -1315,22 +1407,25 @@ - } - - // CHECK-LABEL: "op_dynamic_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) - func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { -- // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { -- // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { -@@ -1340,15 +1435,17 @@ - } - - // CHECK-LABEL: "op_dynamic_update_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> -+ // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> - func.return %0 : tensor<16xf32> - } - - // CHECK-LABEL: "op_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { -- // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> - %0 = "stablehlo.einsum"(%arg0, %arg1) { -@@ -1358,22 +1455,25 @@ - } - - // CHECK-LABEL: "op_exponential_minus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_exponential" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_exponential(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_fft" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { -- // CHECK: "vhlo.fft_v1"(%arg0) <{ -+ // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ - // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: fft_type = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> -@@ -1385,8 +1485,9 @@ - } - - // CHECK-LABEL: "op_floor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_floor(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } -@@ -1399,16 +1500,17 @@ - // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, - // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> - // CHECK-SAME: }> ({ -- // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): -- // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : () -> () - - func.return %arg0 : tensor - } - - // CHECK-LABEL: "op_gather" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { -- // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, -@@ -1430,8 +1532,9 @@ - } - - // CHECK-LABEL: "op_get_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_dimension_size(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_dimension_size"(%arg0) { -@@ -1441,8 +1544,9 @@ - } - - // CHECK-LABEL: "op_get_tuple_element" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { -- // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ -+ // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ - // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> - // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.get_tuple_element"(%arg0) { -@@ -1452,11 +1556,12 @@ - } - - // CHECK-LABEL: "op_if" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.if_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -- // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.if"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1467,15 +1572,17 @@ - } - - // CHECK-LABEL: "op_imag" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_imag(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_infeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.infeed_v1"(%arg0) <{ -+ // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ - // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, - // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> - // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) -@@ -1498,36 +1605,41 @@ - } - - // CHECK-LABEL: "op_is_finite" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_is_finite(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_log_plus_one" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_log_plus_one(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_logistic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_logistic(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_map" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.map_v1"(%arg0) <{ -+ // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): -@@ -1545,57 +1657,65 @@ - } - - // CHECK-LABEL: "op_maximum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_minimum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_multiply" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_negate" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_negate(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_not" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_not(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_optimization_barrier" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_optimization_barrier(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_or" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_outfeed" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 - %0 = "stablehlo.outfeed"(%arg0, %arg1) { -@@ -1605,8 +1725,9 @@ - } - - // CHECK-LABEL: "op_pad" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -1620,36 +1741,41 @@ - } - - // CHECK-LABEL: "op_popcnt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_popcnt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_power" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real_dynamic_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) - func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { -- // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_real" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_real(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_recv" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { -- // CHECK: "vhlo.recv_v1"(%arg0) <{ -+ // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -1662,8 +1788,9 @@ - } - - // CHECK-LABEL: "op_reduce" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1678,8 +1805,9 @@ - } - - // CHECK-LABEL: "op_reduce_precision" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_precision(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ - // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> - // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -@@ -1693,7 +1821,7 @@ - // CHECK_lABEL: "op_reduce_with_promotable_types" - func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) - -> (tensor<4xf64>) { -- // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) -+ // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> -@@ -1708,8 +1836,9 @@ - } - - // CHECK-LABEL: "op_reduce_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, - // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> -@@ -1734,7 +1863,7 @@ - - // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" - func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { -- // CHECK: "vhlo.reduce_scatter_v1"(%arg0) -+ // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> -@@ -1751,8 +1880,9 @@ - - - // CHECK-LABEL: "op_reduce_window" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, -@@ -1777,11 +1907,11 @@ - func.return %0 : tensor<2x9x16x7xf32> - } - --// CHECK_lABEL: "op_reduce_window_with_promotable_types" -+// CHECK-LABEL: "op_reduce_window_with_promotable_types" - func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, - %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> - (tensor<2x2xf64>, tensor<2x2xf32>) { -- // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) -+ // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) -@@ -1801,8 +1931,9 @@ - } - - // CHECK-LABEL: "op_remainder" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -1822,16 +1953,18 @@ - } - - // CHECK-LABEL: "op_reshape" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { -- // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> -+ // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> - %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> - func.return %0 : tensor<4x4xf32> - } - - // CHECK-LABEL: "op_return" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.case_v1"(%arg0) ({ -- // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () -+ // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ -+ // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.case"(%arg0) ({ - "stablehlo.return"(%arg1) : (tensor) -> () -@@ -1840,8 +1973,9 @@ - } - - // CHECK-LABEL: "op_reverse" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.reverse_v1"(%arg0) <{ -+ // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.reverse"(%arg0) { -@@ -1851,8 +1985,9 @@ - } - - // CHECK-LABEL: "op_rng_bit_generator" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { -- // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ -+ // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ - // CHECK-SAME: rng_algorithm = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) - %0:2 = "stablehlo.rng_bit_generator"(%arg0) { -@@ -1862,8 +1997,9 @@ - } - - // CHECK-LABEL: "op_rng" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { -- // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: rng_distribution = #vhlo - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { -@@ -1873,29 +2009,33 @@ - } - - // CHECK-LABEL: "op_round_nearest_afz" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_round_nearest_even" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_round_nearest_even(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_rsqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_rsqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, - // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, - // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, -@@ -1928,7 +2068,7 @@ - func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, - %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> - tensor<200x100x300xf64> { -- // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () - // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> -@@ -1951,8 +2091,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ - // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, - // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, - // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> -@@ -1982,8 +2123,9 @@ - } - - // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { -- // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) -+ // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) - // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): - // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () -@@ -2005,15 +2147,17 @@ - } - - // CHECK-LABEL: "op_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) - func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { -- // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_send" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, - // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, - // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 -@@ -2026,8 +2170,9 @@ - } - - // CHECK-LABEL: "op_set_dimension_size" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { -- // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> - %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { -@@ -2037,43 +2182,49 @@ - } - - // CHECK-LABEL: "op_shift_left" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_arithmetic" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_shift_right_logical" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sign" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sign(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_sine" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sine(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_slice" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { -- // CHECK: "vhlo.slice_v1"(%arg0) <{ -+ // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ - // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, - // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> -@@ -2087,8 +2238,9 @@ - } - - // CHECK-LABEL: "op_sort" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { -- // CHECK: "vhlo.sort_v1"(%arg0) <{ -+ // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ - // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: is_stable = #vhlo.bool_v1 - // CHECK-SAME: }> ({ -@@ -2108,29 +2260,33 @@ - } - - // CHECK-LABEL: "op_sqrt" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_sqrt(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_subtract" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_tanh" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tanh(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_torch_index_select" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { -- // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> - // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> -@@ -2142,8 +2298,9 @@ - } - - // CHECK-LABEL: "op_trace" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_trace(%arg0: tensor) { -- // CHECK: "vhlo.trace_v1"(%arg0) <{ -+ // CHECK: "vhlo.trace_v1"(%[[ARG0]]) <{ - // CHECK-SAME: tag = #vhlo.string_v1<"foo"> - // CHECK-SAME: }> : (!vhlo.tensor_v1) -> () - "stablehlo.trace"(%arg0) { -@@ -2153,8 +2310,9 @@ - } - - // CHECK-LABEL: "op_transpose" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { -- // CHECK: "vhlo.transpose_v1"(%arg0) <{ -+ // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ - // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> - // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> - %0 = "stablehlo.transpose"(%arg0) { -@@ -2164,8 +2322,9 @@ - } - - // CHECK-LABEL: "op_triangular_solve" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { -- // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ -+ // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ - // CHECK-SAME: left_side = #vhlo.bool_v1, - // CHECK-SAME: lower = #vhlo.bool_v1, - // CHECK-SAME: transpose_a = #vhlo, -@@ -2181,15 +2340,17 @@ - } - - // CHECK-LABEL: "op_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_tuple(%arg0: tensor) -> tuple> { -- // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> -+ // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> - %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> - func.return %0 : tuple> - } - - // CHECK-LABEL: "op_unary_einsum" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { -- // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ -+ // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ - // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> - // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> - %0 = "stablehlo.unary_einsum"(%arg0) { -@@ -2199,22 +2360,25 @@ - } - - // CHECK-LABEL: "op_uniform_dequantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { -- // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 - %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "op_uniform_quantize" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { -- // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> - %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "op_while" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @op_while(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.while_v1"(%arg0) ({ -+ // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ - // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () - // CHECK-NEXT: }, { -@@ -2232,8 +2396,9 @@ - } - - // CHECK-LABEL: "op_xor" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } -@@ -2241,197 +2406,225 @@ - // ============ TYPES ============ - - // CHECK-LABEL: "type_i1" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_i64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui4" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui8" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_ui64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FN" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E4M3B11FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f8E5M2FNUZ" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_bf16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f16" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_complex_f32" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_complex_f64" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_dynamism_ranked" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { -- // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 -+ // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 - %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor - func.return %0 : tensor - } - - // CHECK-LABEL: "type_per_tensor_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) - func.func @type_per_tensor_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> - %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> - func.return %0 : tensor> - } - - // CHECK-LABEL: "type_per_axis_quantization" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_per_axis_quantization(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> { -- // CHECK: "vhlo.add_v1"(%arg0, %arg0) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> -+ // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG0]]) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> - %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform> - func.return %0 : tensor<2x!quant.uniform> - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_callee" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () -+ // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () - return %arg0 : !stablehlo.token - } - - // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> - // CHECK-LABEL: "type_token_caller" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { -- // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} -+ // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} - // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 - %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token - return %0 : !stablehlo.token - } - - // CHECK-LABEL: "type_tuple" -+// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) - func.func @type_tuple(%arg0: tuple>) -> tuple { - %0 = "stablehlo.custom_call"(%arg0) { - call_target_name = "foo" +diff --ruN a/stablehlo/stablehlo/reference/Api.cpp b/stablehlo/stablehlo/reference/Api.cpp +--- stablehlo/stablehlo/reference/Api.cpp ++++ stablehlo/stablehlo/reference/Api.cpp +@@ -51,7 +51,7 @@ + auto functions = module.getOps(); + + for (auto funcOp : functions) +- if (funcOp.getSymName().equals(mainName)) return funcOp; ++ if (funcOp.getSymName() == mainName) return funcOp; + + bool isSingleFunction = + std::distance(functions.begin(), functions.end()) == 1; +@@ -68,7 +68,7 @@ + class DefaultInterpreterFallback : public InterpreterFallback { + public: + DefaultInterpreterFallback(const InterpreterConfiguration &config) +- : config(config){}; ++ : config(config) {}; + + virtual llvm::Error operator()(Operation &op, Scope &scope, + Process *process) final { +diff --ruN a/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp b/stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp +--- stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp ++++ stablehlo/stablehlo/transforms/StablehloRefineShapes.cpp +@@ -764,7 +764,7 @@ + + // Clean up operand buffers after refinement + // Must do in this pattern to avoid needing multiple refinement iterations +- if (op.getCallTargetName().equals(kCustomCallOperandBarrierTarget)) { ++ if (op.getCallTargetName() == kCustomCallOperandBarrierTarget) { + Value operand = op.getOperand(0); + if (operand.getType() == op.getResult(0).getType()) { + op.replaceAllUsesWith(ValueRange(operand)); diff --git a/third_party/xla/third_party/stablehlo/workspace.bzl b/third_party/xla/third_party/stablehlo/workspace.bzl index 6a72c8fa16885c..aaef166d96583c 100644 --- a/third_party/xla/third_party/stablehlo/workspace.bzl +++ b/third_party/xla/third_party/stablehlo/workspace.bzl @@ -4,8 +4,8 @@ load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): # LINT.IfChange - STABLEHLO_COMMIT = "8ba7728d3fdc3ea882e893ee7e53255c95ee0e5a" - STABLEHLO_SHA256 = "1dfc7179dc9200c3ab4ea85edbac4a35393866d8cd8694fcaac00c1d27036408" + STABLEHLO_COMMIT = "797bee217e1a041e9aac22cad4db207274596d94" + STABLEHLO_SHA256 = "e5619033e131ea2eeb9eab8c8e362f3ba12e111c6b4a15dac789ca216ff22c58" # LINT.ThenChange(Google-internal path) tf_http_archive( diff --git a/third_party/xla/third_party/triton/temporary/linear_layout_compose_asan.patch b/third_party/xla/third_party/triton/temporary/linear_layout_compose_asan.patch new file mode 100644 index 00000000000000..eff83a166ac4a3 --- /dev/null +++ b/third_party/xla/third_party/triton/temporary/linear_layout_compose_asan.patch @@ -0,0 +1,18 @@ +==== triton/lib/Tools/LinearLayout.cpp#2 - /google/src/cloud/shyshkov/triton_asan/triton/lib/Tools/LinearLayout.cpp ==== +# action=edit type=text +--- triton/lib/Tools/LinearLayout.cpp 2024-05-17 09:15:25.000000000 -0700 ++++ triton/lib/Tools/LinearLayout.cpp 2024-05-21 06:27:58.000000000 -0700 +@@ -397,9 +397,11 @@ + for (auto [outDim, b] : llvm::zip(getOutDimNames(), basis)) { + bases.push_back({outDim, b}); + } +- auto newBases = llvm::make_second_range(outer.apply(bases)); ++ ++ auto outerBases = ++ llvm::to_vector(llvm::make_second_range(outer.apply(bases))); + newInDimBases.push_back( +- std::vector(newBases.begin(), newBases.end())); ++ std::vector(outerBases.begin(), outerBases.end())); + } + } + return LinearLayout(std::move(newBases), outer.getOutDimNames()); diff --git a/third_party/xla/third_party/triton/temporary/pipelining.patch b/third_party/xla/third_party/triton/temporary/pipelining.patch deleted file mode 100644 index 9f5f36aeb5099d..00000000000000 --- a/third_party/xla/third_party/triton/temporary/pipelining.patch +++ /dev/null @@ -1,472 +0,0 @@ -This is patching changes upstream from different PRs that fix issues with -pipelining internally. Required changes are upto and including this commit -https://github.com/openai/triton/commit/70f0b7b6e333fe2155c79dfa8bec6ad388073670 -The patch can be removed with the integration that includes these changes. - -diff --git a/include/triton/Analysis/Utility.h b/include/triton/Analysis/Utility.h ---- a/include/triton/Analysis/Utility.h -+++ b/include/triton/Analysis/Utility.h -@@ -8,6 +8,18 @@ - - namespace mlir { - -+inline bool isZeroConst(Value v) { -+ auto constantOp = v.getDefiningOp(); -+ if (!constantOp) -+ return false; -+ if (auto denseAttr = dyn_cast(constantOp.getValueAttr())) -+ return denseAttr.isSplat() && denseAttr.getSplatValue().isZero(); -+ if (auto denseAttr = -+ dyn_cast(constantOp.getValueAttr())) -+ return denseAttr.isSplat() && denseAttr.getSplatValue().isZero(); -+ return false; -+} -+ - class ReduceOpHelper { - public: - explicit ReduceOpHelper(triton::ReduceOp op) -diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td b/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td ---- a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td -+++ b/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td -@@ -45,6 +45,8 @@ def TTG_AsyncWaitOp : TTG_Op<"async_wait - - let arguments = (ins Variadic:$asyncToken, I32Attr:$num); - -+ let results = (outs TTG_AsyncToken:$retToken); -+ - let assemblyFormat = "$asyncToken attr-dict"; - - let extraClassDeclaration = [{ -@@ -229,10 +231,16 @@ def TTG_LocalLoadOp : TTG_Op<"local_load - let description = [{ - Load a tensor from the local memory descriptor into a distributed tensor. - }]; -- let arguments = (ins TT_MemDescType:$src); -+ let arguments = (ins TT_MemDescType:$src, Optional :$token); -+ -+ let builders = [ -+ OpBuilder<(ins "Type":$retType, "Value":$src), -+ [{ -+ build($_builder, $_state, retType, src, /*token=*/static_cast(nullptr)); -+ }]>]; - - // Use qualified() otherwise "!tt.memdesc" is printed as "". -- let assemblyFormat = [{$src attr-dict `:` qualified(type($src)) `->` type($result)}]; -+ let assemblyFormat = [{$src (`token` $token^)? attr-dict `:` qualified(type($src)) `->` type($result)}]; - - let results = (outs TT_Tensor:$result); - } -diff --git a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp b/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp ---- a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp -+++ b/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp -@@ -8,6 +8,7 @@ - #include "mlir/Interfaces/SideEffectInterfaces.h" - #include "mlir/Support/LLVM.h" - #include "triton/Analysis/AxisInfo.h" -+#include "triton/Analysis/Utility.h" - #include "triton/Dialect/Triton/IR/Types.h" - #include "triton/Dialect/Triton/IR/Utility.h" - #include "triton/Dialect/TritonGPU/IR/Attributes.h" -@@ -84,12 +85,13 @@ createAsyncCopy(scf::ForOp &forOp, tt::L - Location loc = loadOp.getLoc(); - Value src = loadOp.getPtr(); - Value mask = loadOp.getMask(); -+ Value other = loadOp.getOther(); - if (!isExpensiveLoadOrStore(loadOp) && opToInfo[loadOp].blockedEncoding) { - // For inexpensive loads that do not directly feed into dot ops - // we want to use optimal layout for the data. - ttg::BlockedEncodingAttr encoding = opToInfo[loadOp].blockedEncoding; - auto convertBlockLayout = [&](Value src) { -- auto ty = src.getType().cast(); -+ auto ty = cast(src.getType()); - auto newTy = - RankedTensorType::get(ty.getShape(), ty.getElementType(), encoding); - auto cvt = -@@ -99,9 +101,11 @@ createAsyncCopy(scf::ForOp &forOp, tt::L - src = convertBlockLayout(src); - if (mask) - mask = convertBlockLayout(mask); -+ if (other) -+ other = convertBlockLayout(other); - } - -- tt::MemDescType allocTy = alloc.getType().cast(); -+ tt::MemDescType allocTy = cast(alloc.getType()); - SmallVector copyOffsets(allocTy.getRank(), zero); - copyOffsets[0] = insertIdx; - tt::MemDescType subviewTy = tt::MemDescType::get( -@@ -110,11 +114,12 @@ createAsyncCopy(scf::ForOp &forOp, tt::L - auto view = - builder.create(loc, subviewTy, alloc, copyOffsets); - Operation *copy = builder.create( -- loc, src, view, mask, loadOp.getOther(), loadOp.getCache(), -- loadOp.getEvict(), loadOp.getIsVolatile()); -+ loc, src, view, mask, other, loadOp.getCache(), loadOp.getEvict(), -+ loadOp.getIsVolatile()); - Operation *commmit = - builder.create(loc, copy->getResult(0)); -- builder.create(loc, commmit->getResult(0), 0); -+ Operation *wait = -+ builder.create(loc, commmit->getResult(0), 0); - - int stage = opToInfo[loadOp].stage; - bool isMMV3Load = opToInfo[loadOp].loadIsMMAV3; -@@ -142,9 +147,21 @@ createAsyncCopy(scf::ForOp &forOp, tt::L - for (auto alloc : allocsToErase) { - alloc.erase(); - } -- auto sharedLoad = -- builder.create(loc, loadOp.getType(), viewLoad); -- loadOp->replaceAllUsesWith(sharedLoad->getResults()); -+ -+ auto sharedLoad = builder.create( -+ loc, loadOp.getType(), viewLoad, wait->getResult(0)); -+ auto result = sharedLoad->getResults(); -+ -+ // Create a select for non-zero other values as they are not handled by -+ // AsyncCopyGlobalToLocalOp for now. -+ Value other = loadOp.getOther(); -+ if (other && !isZeroConst(other)) { -+ auto select = builder.create( -+ loc, loadOp.getType(), mask, sharedLoad.getResult(), other); -+ result = select->getResults(); -+ } -+ -+ loadOp->replaceAllUsesWith(result); - } - loadOp.erase(); - } -@@ -160,7 +177,7 @@ getSharedEncIfAllUsersAreDotEnc(Value va - if (user->getNumResults() != 1) - return std::nullopt; - if (auto memDesc = -- user->getResult(0).getType().dyn_cast()) { -+ dyn_cast(user->getResult(0).getType())) { - // First time we find a shared encoding in the chain, save it and try to - // use it if it is compatible with the other users. - tempAttr = memDesc.getEncoding().cast(); -@@ -203,7 +220,7 @@ getSharedEncIfAllUsersAreDotEnc(Value va - static ttg::BlockedEncodingAttr - getBlockedEncoding(tt::LoadOp loadOp, tt::ModuleAxisInfoAnalysis &axisInfo) { - Value src = loadOp.getPtr(); -- auto ty = src.getType().cast(); -+ auto ty = cast(src.getType()); - auto mod = loadOp->getParentOfType(); - int numWarps = ttg::TritonGPUDialect::getNumWarps(mod); - int threadsPerWarp = ttg::TritonGPUDialect::getThreadsPerWarp(mod); -@@ -221,7 +238,7 @@ getBlockedEncoding(tt::LoadOp loadOp, tt - - static std::optional - getSharedEncoding(tt::LoadOp loadOp, bool isMMAV3) { -- auto ty = loadOp.getType().cast(); -+ auto ty = cast(loadOp.getType()); - auto ctaLayout = ttg::getCTALayout(ty.getEncoding()); - auto blockedOrder = ttg::getOrder(ty.getEncoding()); - SmallVector order; -@@ -285,11 +302,10 @@ loadOpsToDistanceAndUse(scf::ForOp forOp - if (auto mask = loadOp.getMask()) - vec = std::min(vec, axisInfoAnalysis.getMaskAlignment(mask)); - -- auto tensorTy = ptr.getType().dyn_cast(); -+ auto tensorTy = dyn_cast(ptr.getType()); - if (!tensorTy) - return false; -- auto ty = -- tensorTy.getElementType().cast().getPointeeType(); -+ auto ty = cast(tensorTy.getElementType()).getPointeeType(); - unsigned width = vec * ty.getIntOrFloatBitWidth(); - - // We do not pipeline all loads for the following reasons: -@@ -353,7 +369,7 @@ static bool loadIsMMAv3(tt::LoadOp loadO - - // MMA V3 case. - auto newOrder = sharedEnc.getOrder(); -- auto ty = loadOp.getType().cast(); -+ auto ty = cast(loadOp.getType()); - auto oldOrder = ttg::getOrder(ty.getEncoding()); - - // The operand of MMAv3 is in SharedEncoding and its order should not -@@ -497,7 +513,7 @@ collectOpsToPipeline(scf::ForOp forOp, - static Value createAlloc(scf::ForOp &forOp, tt::LoadOp loadOp, - ttg::SharedEncodingAttr sharedEnc, unsigned distance) { - OpBuilder builder(forOp); -- auto ty = loadOp.getType().cast(); -+ auto ty = cast(loadOp.getType()); - SmallVector bufferShape(ty.getShape().begin(), ty.getShape().end()); - bufferShape.insert(bufferShape.begin(), distance); - Type memdescType = mlir::triton::MemDescType::get( -@@ -669,12 +685,23 @@ createSchedule(scf::ForOp forOp, int num - } - }); - -+ auto getNestedOperands = [](Operation *op) -> SmallVector { -+ SmallVector operands; -+ op->walk([&](Operation *nestedOp) { -+ for (Value operand : nestedOp->getOperands()) { -+ if (operand.getParentBlock()->getParentOp()->isAncestor(nestedOp)) -+ operands.push_back(operand); -+ } -+ }); -+ return operands; -+ }; -+ - // Find dependencies with distance of 1. - SmallVector> distanceOneUsers(numStages); - for (int stage = 0; stage < numStages - 1; stage++) { - auto &group = insertAndDeps[stage]; - for (Operation *op : group) { -- for (Value operand : op->getOperands()) { -+ for (Value operand : getNestedOperands(op)) { - if (auto arg = operand.dyn_cast()) { - if (arg.getArgNumber() > 0 && arg.getOwner() == op->getBlock()) { - auto yieldOp = op->getBlock()->getTerminator(); -@@ -905,7 +932,7 @@ static int minNumInterleavedCommitOps(Op - // Look for consecutive wait ops and combine them into a single wait op. - static void - combineRedundantWaitOps(llvm::SmallSetVector &waitOps) { -- llvm::SmallSetVector toDelete; -+ llvm::MapVector toDelete; - for (auto waitOp : waitOps) { - if (toDelete.count(waitOp)) - continue; -@@ -927,10 +954,13 @@ combineRedundantWaitOps(llvm::SmallSetVe - OpBuilder builder(waitGroup.back()); - auto newWaitOp = builder.create(waitOp.getLoc(), - depTokens, minWaitNumber); -- toDelete.insert(waitGroup.begin(), waitGroup.end()); -+ for (auto waitOp : waitGroup) { -+ toDelete[waitOp] = newWaitOp; -+ } - } - for (auto waitOp : toDelete) { -- waitOp->erase(); -+ waitOp.first->replaceAllUsesWith(waitOp.second); -+ waitOp.first->erase(); - } - } - -@@ -1010,7 +1040,7 @@ static void threadValuesThroughWait(ttng - - for (ttng::DotAsyncOp dot : asyncDots) { - for (Value operand : dot.getOperands()) { -- if (operand.getType().isa()) { -+ if (isa(operand.getType())) { - newOperands.insert(operand); - } - } -@@ -1020,15 +1050,21 @@ static void threadValuesThroughWait(ttng - // values in the operation. - auto newWait = builder.create( - wait.getLoc(), llvm::to_vector(newOperands), wait.getPendings()); -+ -+ auto dominatedByNewWait = [&](OpOperand &operand) { -+ auto opInThisBlock = -+ newWait->getBlock()->findAncestorOpInBlock(*operand.getOwner()); -+ return opInThisBlock && newWait->isBeforeInBlock(opInThisBlock); -+ }; - for (int i = 0; i < origNumOperands; i++) { - Value operand = wait.getResult(i); -- if (!operand.getType().isa()) -+ if (!isa(operand.getType())) - operand.replaceAllUsesWith(newWait.getResult(i)); - } - for (int i = origNumOperands; i < newOperands.size(); i++) { - Value operand = newWait.getOperand(i); -- if (!operand.getType().isa()) -- operand.replaceAllUsesExcept(newWait.getResult(i), newWait); -+ if (!isa(operand.getType())) -+ operand.replaceUsesWithIf(newWait.getResult(i), dominatedByNewWait); - } - wait->erase(); - } -@@ -1047,8 +1083,8 @@ static void threadValuesThroughWait(ttng - // 1. All operands that touch shared memory are multi-buffered, i.e. can't read - // an incomplete value while it's being written asynchronously by a load. - // --// 2. During iteration i, nothing other than the loop's `yield` reads the --// result of the dot. -+// 2. If the dot is used by any op in the loop, it must be used under an `if`, -+// and will be synced with a `wait 0` at the beginning of the `if` block. - // - // 3. During iteration i, between the start of the loop up until the first - // `ttng.dot_wait {pendings=0}` op, the result of the dot from iteration i-1 -@@ -1079,7 +1115,7 @@ static std::optional dotCanBeProper - // Rule 1: All shmem operands are multi-buffered. - auto checkOperand = [&](Value operand) { - if (!isa( -- operand.getType().cast().getEncoding())) { -+ cast(operand.getType()).getEncoding())) { - return true; - } - -@@ -1103,17 +1139,41 @@ static std::optional dotCanBeProper - return std::nullopt; - } - -- // Rule 2: The dot should only be used by the for loop's `yield`. -- if (!dotOp->hasOneUse() || -- *dotOp->getUsers().begin() != forOp.getBody()->getTerminator()) { -- LDBG("Can't make dot async because it is not used only by the loop's " -- "`yield`."); -- return std::nullopt; -+ // Rule 2: The dot cannot be unconditionally used by any op in the loop. -+ // Uses under `if` are allowed, as can be explicitly synced with a `wait 0`. -+ int iterArgIdx = -1; -+ Value iterArg = nullptr; -+ SmallVector> queue; -+ for (auto &use : dotOp->getUses()) { -+ queue.push_back({use.getOwner(), use.getOperandNumber()}); - } -- -- // The result of the dot becomes this loop carry value. -- auto iterArgIdx = dotOp->getUses().begin()->getOperandNumber(); -- auto iterArg = forOp.getRegionIterArg(iterArgIdx); -+ while (!queue.empty()) { -+ auto [user, argIdx] = queue.pop_back_val(); -+ if (user->getParentOp() == forOp) { -+ if (isa(user)) { -+ if (iterArg) { -+ // The dot is used by the loop's yield, but we can't have any other -+ // uses. -+ return std::nullopt; -+ } -+ iterArgIdx = argIdx; -+ iterArg = forOp.getRegionIterArg(argIdx); -+ continue; -+ } -+ return std::nullopt; -+ } -+ if (auto ifOp = dyn_cast(user->getParentOp())) { -+ if (isa(user)) { -+ // The result is returned by the if, follow it further. -+ auto uses = ifOp.getResult(argIdx).getUses(); -+ for (auto &use : uses) { -+ queue.push_back({use.getOwner(), use.getOperandNumber()}); -+ } -+ } -+ } else { -+ return std::nullopt; -+ } -+ } - - // Rule 3a: Are the only users of the dot's result from iteration i-1 other - // MMAv3 dots? If so, we're done, this dot can be properly async. -@@ -1181,6 +1241,32 @@ static void insertAsyncDotWaitInLoop( - return; - } - -+ // Insert waits before the users of the properly async dots other than loop -+ // yield. -+ for (auto [asyncDot, iterArgIdx] : properlyAsyncDots) { -+ SmallVector uses; -+ for (auto &use : asyncDot->getUses()) { -+ if (auto yieldOp = dyn_cast(use.getOwner())) { -+ continue; -+ } -+ uses.push_back(&use); -+ } -+ -+ DenseMap> blockToUsers; -+ for (auto use : uses) { -+ auto block = use->getOwner()->getBlock(); -+ blockToUsers[block].push_back(use->get()); -+ } -+ -+ for (auto [block, users] : blockToUsers) { -+ OpBuilder builder(block, block->begin()); -+ auto newWait = builder.create(asyncDot->getLoc(), -+ ArrayRef{}, 0); -+ -+ threadValuesThroughWait(newWait, users); -+ } -+ } -+ - // Add the wait right after the last properly-async dot. This only needs to - // wait for all properly-async dots from the i-1'th iteration to complete, IOW - // we wait until there are most `asyncDots.size()` dots in flight. -diff --git a/test/TritonGPU/loop-pipeline.mlir b/test/TritonGPU/loop-pipeline.mlir ---- a/test/TritonGPU/loop-pipeline.mlir -+++ b/test/TritonGPU/loop-pipeline.mlir -@@ -349,16 +349,21 @@ tt.func @indirect_bmm_scalar_dist_one(%7 - // CHECK: triton_gpu.async_copy_global_to_local - // CHECK: triton_gpu.async_copy_global_to_local - // CHECK: triton_gpu.async_commit_group -+// CHECK: triton_gpu.async_wait {{.*}} {num = 1 : i32} -+// CHECK: scf.for -+// CHECK: tt.dot - // CHECK: %[[NEXT_BUFFER_1:.*]] = tt.addptr %{{.*}}, {{.*}} - // CHECK: triton_gpu.async_copy_global_to_local %[[NEXT_BUFFER_1]] --// CHECK: %[[IND_BUFFER_0:.*]] = triton_gpu.memdesc_subview --// CHECK: %[[IND_BUFFER_1:.*]] = triton_gpu.local_load %[[IND_BUFFER_0]] -+// CHECK-DAG: %[[IND_BUFFER_WAIT_TOKEN:.*]] = triton_gpu.async_wait {{.*}} {num = 1 : i32} -+// CHECK-DAG: %[[IND_BUFFER_0:.*]] = triton_gpu.memdesc_subview -+// CHECK: %[[IND_BUFFER_1:.*]] = triton_gpu.local_load %[[IND_BUFFER_0]] token %[[IND_BUFFER_WAIT_TOKEN]] - // CHECK: %[[IND_BUFFER_2:.*]] = tt.expand_dims %[[IND_BUFFER_1]] {axis = 1 : i32} - // CHECK: %[[IND_BUFFER_3:.*]] = tt.broadcast %[[IND_BUFFER_2]] - // CHECK: %[[IND_BUFFER_4:.*]] = arith.muli {{.*}}, %[[IND_BUFFER_3]] - // CHECK: %[[NEXT_BUFFER_0:.*]] = tt.addptr {{.*}}, %[[IND_BUFFER_4]] - // CHECK: triton_gpu.async_copy_global_to_local %[[NEXT_BUFFER_0]] - // CHECK: triton_gpu.async_wait {{.*}} {num = 1 : i32} -+// CHECK: scf.yield - tt.func @indirect_bmm_vector(%77: tensor<16x16xi64, #BL> {tt.divisibility=16: i32, tt.constancy=16: i32}, - %76: index, - %49: tensor<16x16x!tt.ptr, #AL> {tt.divisibility=16: i32, tt.contiguity=2 : i32}, -diff --git a/test/TritonGPU/reorder-instructions.mlir b/test/TritonGPU/reorder-instructions.mlir ---- a/test/TritonGPU/reorder-instructions.mlir -+++ b/test/TritonGPU/reorder-instructions.mlir -@@ -28,7 +28,7 @@ module attributes {"triton_gpu.num-warps - // CHECK: triton_gpu.async_wait {num = 0 : i32} - // CHECK: triton_gpu.local_dealloc %0 : !tt.memdesc<4x128x64xf16, #shared> - // CHECK: triton_gpu.local_dealloc %1 : !tt.memdesc<4x128x64xf16, #shared> --// CHECK: %2 = triton_gpu.convert_layout %arg0 : tensor<32x32xf32, #blocked> -> tensor<32x32xf32, #blocked1> -+// CHECK: %3 = triton_gpu.convert_layout %arg0 : tensor<32x32xf32, #blocked> -> tensor<32x32xf32, #blocked1> - #blocked = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [1, 4], order = [0, 1]}> - #blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [1, 4], order = [1, 0]}> - #shared = #triton_gpu.shared<{vec = 8, perPhase = 1, maxPhase = 4, order = [0, 1]}> -diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp ---- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp -+++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp -@@ -333,17 +333,6 @@ static Value faddAccumulate(ConversionPa - return newStruct; - } - --static bool isZero(Value v) { -- auto constantOp = v.getDefiningOp(); -- if (!constantOp) -- return false; -- if (auto denseAttr = dyn_cast(constantOp.getValueAttr())) -- return denseAttr.isSplat() && denseAttr.getSplatValue().isZero(); -- if (auto denseAttr = -- dyn_cast(constantOp.getValueAttr())) -- return denseAttr.isSplat() && denseAttr.getSplatValue().isZero(); -- return false; --} - - static SmallVector emitWait(ConversionPatternRewriter &rewriter, - Location loc, SmallVector acc, -@@ -402,7 +391,7 @@ LogicalResult convertDot(const LLVMTypeC - int M = 4 * instrShape[0]; - int N = instrShape[1]; - int K = instrShape[2]; -- bool zeroAcc = isZero(c); -+ bool zeroAcc = isZeroConst(c); - auto shapePerCTATile = getShapePerCTATile(mmaEncoding); - int numRepM = ceil(dShapePerCTA[0], shapePerCTATile[0]); - int numRepN = ceil(dShapePerCTA[1], shapePerCTATile[1]); -diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp ---- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp -+++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp -@@ -924,8 +924,11 @@ struct AsyncWaitOpConversion - auto voidTy = void_ty(ctx); - ptxBuilder.launch(rewriter, loc, voidTy); - -- // Safe to remove the op since it doesn't have any return value. -- rewriter.eraseOp(op); -+ // Drop the result token. -+ Value zero = rewriter.create( -+ op.getLoc(), IntegerType::get(op.getContext(), 32), -+ rewriter.getI32IntegerAttr(0)); -+ rewriter.replaceOp(op, zero); - return success(); - } - }; diff --git a/third_party/xla/third_party/triton/temporary/series.bzl b/third_party/xla/third_party/triton/temporary/series.bzl index b6002f83e1d7bb..b3d935c048fadd 100644 --- a/third_party/xla/third_party/triton/temporary/series.bzl +++ b/third_party/xla/third_party/triton/temporary/series.bzl @@ -6,6 +6,5 @@ internal patch during the next triton integration process. """ temporary_patch_list = [ - "//third_party/triton/temporary:pipelining.patch", - "//third_party/triton/temporary:support_ceil_op.patch", + "//third_party/triton/temporary:linear_layout_compose_asan.patch", ] diff --git a/third_party/xla/third_party/triton/temporary/support_ceil_op.patch b/third_party/xla/third_party/triton/temporary/support_ceil_op.patch deleted file mode 100644 index 71b323d9fccdca..00000000000000 --- a/third_party/xla/third_party/triton/temporary/support_ceil_op.patch +++ /dev/null @@ -1,138 +0,0 @@ -Cherry-picking https://github.com/openai/triton/commit/62706e8c518c8c56e56460a43732d8e375217860 -until the next integration lands it. Can be removed as it is already merged. - -diff --git a/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp b/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp ---- a/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp -+++ b/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp -@@ -805,6 +805,7 @@ void mlir::triton::populateElementwiseOp - POPULATE_UNARY_OP(arith::FPToUIOp, LLVM::FPToUIOp) - POPULATE_UNARY_OP(arith::UIToFPOp, LLVM::UIToFPOp) - POPULATE_UNARY_OP(math::FloorOp, math::FloorOp) -+ POPULATE_UNARY_OP(math::CeilOp, math::CeilOp) - POPULATE_UNARY_OP(math::LogOp, math::LogOp) - POPULATE_UNARY_OP(math::Log2Op, math::Log2Op) - POPULATE_UNARY_OP(math::CosOp, math::CosOp) -diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp ---- a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp -+++ b/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp -@@ -125,12 +125,13 @@ void populateMathPatternsAndLegality(Tri - MLIRContext *context = patterns.getContext(); - // Rewrite rule - patterns.add, GenericOpPattern, -- GenericOpPattern, GenericOpPattern, -- GenericOpPattern, GenericOpPattern, -- GenericOpPattern, GenericOpPattern, -- GenericOpPattern, GenericOpPattern, -- GenericOpPattern, GenericOpPattern, -- GenericOpPattern>(typeConverter, context); -+ GenericOpPattern, GenericOpPattern, -+ GenericOpPattern, GenericOpPattern, -+ GenericOpPattern, GenericOpPattern, -+ GenericOpPattern, GenericOpPattern, -+ GenericOpPattern, GenericOpPattern, -+ GenericOpPattern, GenericOpPattern>( -+ typeConverter, context); - } - - // -diff --git a/lib/Dialect/TritonNvidiaGPU/Transforms/PlanCTA.cpp b/lib/Dialect/TritonNvidiaGPU/Transforms/PlanCTA.cpp ---- a/lib/Dialect/TritonNvidiaGPU/Transforms/PlanCTA.cpp -+++ b/lib/Dialect/TritonNvidiaGPU/Transforms/PlanCTA.cpp -@@ -651,10 +651,9 @@ bool CTAPlanner::isElementwiseOp(Operati - math::CeilOp, math::CopySignOp, math::CosOp, math::SinOp, - math::CountLeadingZerosOp, math::CountTrailingZerosOp, - math::CtPopOp, math::ErfOp, math::ExpOp, math::Exp2Op, -- math::FloorOp, math::ExpM1Op, math::FloorOp, math::FmaOp, -- math::LogOp, math::Log10Op, math::Log1pOp, math::Log2Op, -- math::PowFOp, math::RsqrtOp, math::SqrtOp, math::RsqrtOp, -- math::TanhOp>(op)) -+ math::FloorOp, math::ExpM1Op, math::FmaOp, math::LogOp, -+ math::Log10Op, math::Log1pOp, math::Log2Op, math::PowFOp, -+ math::RsqrtOp, math::SqrtOp, math::RsqrtOp, math::TanhOp>(op)) - return true; - if (llvm::isa Value { - return self.create(val); - }) -+ .def("create_ceil", -+ [](TritonOpBuilder &self, Value &val) -> Value { -+ return self.create(val); -+ }) - .def("create_exp", - [](TritonOpBuilder &self, Value &val) -> Value { - return self.create(val); -diff --git a/python/test/unit/language/test_core.py b/python/test/unit/language/test_core.py ---- a/python/test/unit/language/test_core.py -+++ b/python/test/unit/language/test_core.py -@@ -915,10 +915,11 @@ def test_unary_op(dtype_x, expr, num_cta - - - @pytest.mark.interpreter --@pytest.mark.parametrize("dtype_x, expr, x", [(dtype_x, expr, x) -- for dtype_x in ["float32", "float64"] -- for expr in ['exp', 'log', 'cos', 'sin', 'exp2', 'log2', 'sqrt', 'floor'] -- for x in ['x', '3.0']]) -+@pytest.mark.parametrize("dtype_x, expr, x", -+ [(dtype_x, expr, x) -+ for dtype_x in ["float32", "float64"] -+ for expr in ['exp', 'log', 'cos', 'sin', 'exp2', 'log2', 'sqrt', 'floor', 'ceil'] -+ for x in ['x', '3.0']]) - def test_math_op(dtype_x, expr, x, device): - _test_unary(dtype_x, f'tl.{expr}({x})', f'np.{expr}({x}) ', device=device) - -diff --git a/python/triton/language/__init__.py b/python/triton/language/__init__.py ---- a/python/triton/language/__init__.py -+++ b/python/triton/language/__init__.py -@@ -102,7 +102,8 @@ from .core import ( - void, - where, - ) --from .math import (umulhi, exp, exp2, fma, log, log2, cos, rsqrt, sin, sqrt, sqrt_rn, abs, fdiv, div_rn, erf, floor) -+from .math import (umulhi, exp, exp2, fma, log, log2, cos, rsqrt, sin, sqrt, sqrt_rn, abs, fdiv, div_rn, erf, floor, -+ ceil) - from .random import ( - pair_uniform_to_normal, - philox, -@@ -142,6 +143,7 @@ from .random import ( - "builtin", - "cat", - "cdiv", -+ "ceil", - "clamp", - "const", - "const_pointer_type", -diff --git a/python/triton/language/math.py b/python/triton/language/math.py ---- a/python/triton/language/math.py -+++ b/python/triton/language/math.py -@@ -230,6 +230,15 @@ def floor(x, _builder=None): - - - @core.builtin -+@_check_dtype(dtypes=["fp32", "fp64"]) -+@_add_math_1arg_docstr("ceil") -+@core._tensor_member_fn -+def ceil(x, _builder=None): -+ x = core._to_tensor(x, _builder) -+ return core.tensor(_builder.create_ceil(x.handle), x.type) -+ -+ -+@core.builtin - @_add_math_3arg_docstr("fused multiply-add") - def fma(x, y, z, _builder=None): - x = core._to_tensor(x, _builder) -diff --git a/python/triton/runtime/interpreter.py b/python/triton/runtime/interpreter.py ---- a/python/triton/runtime/interpreter.py -+++ b/python/triton/runtime/interpreter.py -@@ -391,6 +391,7 @@ class InterpreterBuilder: - create_fabs = lambda self, arg: self.unary_op(arg, np.abs) - create_iabs = lambda self, arg: self.unary_op(arg, np.abs) - create_floor = lambda self, arg: self.unary_op(arg, np.floor) -+ create_ceil = lambda self, arg: self.unary_op(arg, np.ceil) - create_log = lambda self, arg: self.unary_op(arg, np.log) - create_log2 = lambda self, arg: self.unary_op(arg, np.log2) - create_precise_sqrt = lambda self, arg: self.unary_op(arg, np.sqrt) diff --git a/third_party/xla/third_party/triton/workspace.bzl b/third_party/xla/third_party/triton/workspace.bzl index 45daf7974a022e..a257f1f3e44645 100644 --- a/third_party/xla/third_party/triton/workspace.bzl +++ b/third_party/xla/third_party/triton/workspace.bzl @@ -8,8 +8,8 @@ load("//third_party/triton/xla_extensions:series.bzl", "extensions_files_patch_l def repo(): """Imports Triton.""" - TRITON_COMMIT = "cl623533461" - TRITON_SHA256 = "7aa74e82e4417a91fc7a7a84b4f6ad2b7e4e58512758d6c78ca3cd1c8771326b" + TRITON_COMMIT = "cl634675237" + TRITON_SHA256 = "7151d057ee8443c2f45cbe18a7435a42f37e18f562e5d238b844b6e09fc560e6" tf_http_archive( name = "triton", sha256 = TRITON_SHA256, diff --git a/third_party/xla/third_party/triton/xla_extensions/env_vars.patch b/third_party/xla/third_party/triton/xla_extensions/env_vars.patch deleted file mode 100644 index 955eb6db8da68e..00000000000000 --- a/third_party/xla/third_party/triton/xla_extensions/env_vars.patch +++ /dev/null @@ -1,14 +0,0 @@ -Long standing patch due to licensing issues. -diff --git a/include/triton/Tools/Sys/GetEnv.hpp b/include/triton/Tools/Sys/GetEnv.hpp -index 31bc03fe1..a19a432df 100644 ---- a/include/triton/Tools/Sys/GetEnv.hpp -+++ b/include/triton/Tools/Sys/GetEnv.hpp -@@ -34,7 +34,7 @@ inline const std::set ENV_VARS = { - "AMDGCN_ENABLE_DUMP", - "DISABLE_FAST_REDUCTION", - "DISABLE_LLVM_OPT", -- "DISABLE_MMA_V3", -+ "ENABLE_MMA_V3", - "DISABLE_PTXAS_OPT", - "LLVM_IR_ENABLE_DUMP", - "MLIR_ENABLE_DUMP", diff --git a/third_party/xla/third_party/triton/xla_extensions/series.bzl b/third_party/xla/third_party/triton/xla_extensions/series.bzl index b858da203fb094..af524fb253cbef 100644 --- a/third_party/xla/third_party/triton/xla_extensions/series.bzl +++ b/third_party/xla/third_party/triton/xla_extensions/series.bzl @@ -4,7 +4,6 @@ applied in the previous copybara workflow. """ extensions_files_patch_list = [ - "//third_party/triton/xla_extensions:env_vars.patch", # File not exported to google "//third_party/triton/xla_extensions:sparse_dot_nvgpu.patch", # Sparsity internal patch "//third_party/triton/xla_extensions:sparse_dot_base.patch", # Sparsity internal patch "//third_party/triton/xla_extensions:sparse_dot_passes.patch", # Sparsity internal patch diff --git a/third_party/xla/third_party/triton/xla_extensions/sparse_dot_base.patch b/third_party/xla/third_party/triton/xla_extensions/sparse_dot_base.patch index dcacd99740b18f..08b7dd6f7ada87 100644 --- a/third_party/xla/third_party/triton/xla_extensions/sparse_dot_base.patch +++ b/third_party/xla/third_party/triton/xla_extensions/sparse_dot_base.patch @@ -1,8 +1,9 @@ diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td b/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td +index 56f0b6b49..aa91ea9b8 100644 --- a/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td +++ b/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td -@@ -1158,4 +1158,12 @@ section 9.7.13.4.1 for more details. - let extraClassDeclaration = extraDistributedDeclaration; +@@ -1262,4 +1262,16 @@ section 9.7.13.4.1 for more details. + }]; } +def SparseDotMetaEncodingAttr : DistributedEncoding<"SparseDotMetaEncoding", "sparse_dot_meta_encoding"> { @@ -10,14 +11,19 @@ diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td b/include/ + + let parameters = (ins "Attribute":$parent); + let assemblyFormat = "`<``{` struct(params) `}``>`"; -+ let extraClassDeclaration = extraDistributedDeclaration; ++ let extraClassDeclaration = extraDistributedDeclaration # [{ ++ SmallVector getContigPerThread() { ++ return getSizePerThread(); ++ }; ++ }]; +} + #endif diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td b/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td +index 4966a5f73..d2bb33cfa 100644 --- a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td +++ b/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td -@@ -7,6 +7,7 @@ include "triton/Dialect/TritonGPU/IR/Tri +@@ -7,6 +7,7 @@ include "triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td" include "mlir/Dialect/Arith/IR/ArithBase.td" include "triton/Dialect/Triton/IR/TritonTypes.td" include "triton/Dialect/Triton/IR/TritonAttrDefs.td" @@ -25,8 +31,8 @@ diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td b/include/trito include "mlir/IR/OpBase.td" include "mlir/Interfaces/SideEffectInterfaces.td" // Pure include "mlir/Interfaces/InferTypeOpInterface.td" // SameOperandsAndResultType -@@ -214,4 +215,19 @@ def TTG_LocalLoadOp : TTG_Op<"local_load - let results = (outs TT_Tensor:$result); +@@ -232,4 +233,19 @@ def TTG_LocalStoreOp : TTG_Op<"local_store", [MemoryEffects<[MemWrite shape, return encoding; } @@ -70,7 +77,7 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia + +LogicalResult SparseDotOp::verify() { + // Verify operand A. -+ auto aTensorTy = getOperand(0).getType().cast(); ++ auto aTensorTy = cast(getOperand(0).getType()); + auto aElemTy = aTensorTy.getElementType(); + if (!aElemTy.isF16() && !aElemTy.isBF16()) + return emitError("element type of operand A is not supported"); @@ -78,7 +85,7 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia + if (aShape.size() != 2) return emitError("shape of operand A is incorrect"); + + // Verify operand B. -+ auto bTensorTy = getOperand(1).getType().cast(); ++ auto bTensorTy = cast(getOperand(1).getType()); + auto bElemTy = bTensorTy.getElementType(); + if (!bElemTy.isF16() && !bElemTy.isBF16()) + return emitError("element type of operand B is not supported"); @@ -86,7 +93,7 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia + if (bShape.size() != 2) return emitError("shape of operand B is incorrect"); + + // Verify operand C. -+ auto cTensorTy = getOperand(2).getType().cast(); ++ auto cTensorTy = cast(getOperand(2).getType()); + auto cElemTy = cTensorTy.getElementType(); + if (!cElemTy.isF32()) + return emitError("element type of operand C is not supported"); @@ -101,7 +108,7 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia + return emitError("operand element types do not match"); + + // Verify sparse metadata. -+ auto metaTy = getOperand(3).getType().cast(); ++ auto metaTy = cast(getOperand(3).getType()); + auto metaShape = metaTy.getShape(); + if (!metaTy.getElementType().isInteger(16) || metaShape.size() != 2) + return emitError("sparse metadata tensor is invalid"); @@ -125,7 +132,7 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia +//--- SparseDotMetaEncodingAttr --- +unsigned SparseDotMetaEncodingAttr::getTotalElemsPerThread( + ArrayRef shape, Type eltTy) const { -+ auto mmaLayout = getParent().cast(); ++ auto mmaLayout = mlir::cast(getParent()); + return product(shape) / + (mmaLayout.getWarpsPerCTA()[0] * kMetadataElementsPerWarp); +} @@ -169,9 +176,10 @@ diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dia } // namespace triton } // namespace mlir diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM.cpp +index f8ece0f1c..435610817 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM.cpp -@@ -38,6 +38,14 @@ Value convertLayout(int opIdx, Conversio +@@ -43,6 +43,14 @@ Value convertLayout(int opIdx, ConversionPatternRewriter &rewriter, const LLVMTypeConverter *typeConverter, Value thread); } @@ -185,19 +193,19 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM. + namespace { - struct LocalLoadOpConversion -@@ -59,6 +67,10 @@ public: - .isa()) { + using namespace mlir; +@@ -67,6 +75,10 @@ public: + cast(dstLayout).getParent())) { return lowerSharedToDotOperand(op, adaptor, getTypeConverter(), rewriter); } -+ if (srcLayout.isa() && -+ dstLayout.isa()) { ++ if (isa(srcLayout) && ++ isa(dstLayout)) { + return lowerSharedToSparseMeta(op, adaptor, getTypeConverter(), rewriter); + } return failure(); } -@@ -130,6 +142,29 @@ private: +@@ -138,6 +150,26 @@ private: rewriter.replaceOp(op, res); return success(); } @@ -208,13 +216,10 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM. + const LLVMTypeConverter *typeConverter, + ConversionPatternRewriter &rewriter) const { + auto loc = op.getLoc(); -+ auto sparseEncoding = op.getResult() -+ .getType() -+ .cast() -+ .getEncoding() -+ .cast(); ++ auto sparseEncoding = cast( ++ cast(op.getResult().getType()).getEncoding()); + auto llvmElemTy = typeConverter->convertType( -+ op.getSrc().getType().cast().getElementType()); ++ cast(op.getSrc().getType()).getElementType()); + auto smemObj = getSharedMemoryObjectFromStruct(loc, adaptor.getSrc(), + llvmElemTy, rewriter); + Value res = SharedToSparseDotOperand::convertLayout( @@ -229,6 +234,7 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM. struct ConvertLayoutOpOptimizedConversion diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp new file mode 100644 +index 000000000..3011cf73d --- /dev/null +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp @@ -0,0 +1,69 @@ @@ -255,7 +261,7 @@ new file mode 100644 + Value thread) { + // Calculate tile size as number of mask elements (4xi4). + NvidiaMmaEncodingAttr mmaLayout = -+ sparseEncoding.getParent().cast(); ++ cast(sparseEncoding.getParent()); + SmallVector shapePerCTATile = { + kTileSize * mmaLayout.getWarpsPerCTA()[0], + kTileSize / kMetadataElementsPerPackedValue}; @@ -272,7 +278,7 @@ new file mode 100644 + Value rowId = add(mul(warpGroupId, i32_val(kTileSize)), laneGroupId); + + // Calculate number of tile repetitions. -+ auto shape = tensor.getType().cast().getShape(); ++ auto shape = cast(tensor.getType()).getShape(); + int repM = shape[0] / shapePerCTATile[0]; + int repK = shape[1] / shapePerCTATile[1]; + assert(repM > 0 && repK > 0); @@ -302,9 +308,10 @@ new file mode 100644 +} +} // namespace SharedToSparseDotOperand diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp +index 374b9ec9e..1601806b4 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp -@@ -32,6 +32,12 @@ LogicalResult convertAsyncWGMMA(triton:: +@@ -32,6 +32,12 @@ LogicalResult convertAsyncWGMMA(triton::nvidia_gpu::DotAsyncOp op, const LLVMTypeConverter *typeConverter, ConversionPatternRewriter &rewriter, Value thread); @@ -317,7 +324,7 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp b/thir namespace { struct DotOpConversion : public ConvertOpToLLVMPattern { using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; -@@ -180,6 +186,18 @@ struct DotWaitOpConversion +@@ -174,6 +180,18 @@ struct DotWaitOpConversion return success(); } }; @@ -336,7 +343,7 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp b/thir } // namespace void mlir::triton::NVIDIA::populateDotOpToLLVMPatterns( -@@ -188,4 +206,5 @@ void mlir::triton::NVIDIA::populateDotOp +@@ -182,4 +200,5 @@ void mlir::triton::NVIDIA::populateDotOpToLLVMPatterns( patterns.add(typeConverter, benefit); patterns.add(typeConverter, benefit); patterns.add(typeConverter, benefit); @@ -344,6 +351,7 @@ diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp b/thir } diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/Sparse.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/Sparse.cpp new file mode 100644 +index 000000000..34d9212d2 --- /dev/null +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/Sparse.cpp @@ -0,0 +1,339 @@ @@ -397,15 +405,15 @@ new file mode 100644 + const LLVMTypeConverter *typeConverter, + ConversionPatternRewriter &rewriter) { + // Get number of repetitions across the dimensions. -+ auto aTensorTy = op.getA().getType().cast(); -+ auto bTensorTy = op.getB().getType().cast(); ++ auto aTensorTy = cast(op.getA().getType()); ++ auto bTensorTy = cast(op.getB().getType()); + -+ auto layoutA = aTensorTy.getEncoding().dyn_cast(); -+ auto layoutB = bTensorTy.getEncoding().dyn_cast(); ++ auto layoutA = dyn_cast(aTensorTy.getEncoding()); ++ auto layoutB = dyn_cast(bTensorTy.getEncoding()); + assert(layoutA != nullptr && layoutB != nullptr); + + int bitwidth = aTensorTy.getElementType().getIntOrFloatBitWidth(); -+ auto mmaEnc = layoutA.getParent().cast(); ++ auto mmaEnc = cast(layoutA.getParent()); + auto repA = mmaEnc.getMMAv2Rep(triton::gpu::getShapePerCTA(aTensorTy), + bitwidth, layoutA.getOpIdx()); + auto repB = mmaEnc.getMMAv2Rep(triton::gpu::getShapePerCTA(bTensorTy), @@ -435,7 +443,7 @@ new file mode 100644 + } + + // Flatten accumulator values. -+ auto dTensorTy = op.getD().getType().cast(); ++ auto dTensorTy = cast(op.getD().getType()); + auto fc = unpackLLElements(loc, adaptor.getC(), rewriter); + + // Create `mma.sp` instruction for 4/8 core matrices. @@ -512,7 +520,7 @@ new file mode 100644 + Location loc, std::vector instrShape, + bool trans, int dimWpt, Value warpId, MemDescType tensorTy, + Value baseDesc, int minor) { -+ auto sharedLayout = tensorTy.getEncoding().cast(); ++ auto sharedLayout = cast(tensorTy.getEncoding()); + int elemBytes = tensorTy.getElementTypeBitWidth() / 8; + int elemsPerSwizzlingRow = + kMmaLineSize / sharedLayout.getPerPhase() / elemBytes; @@ -541,10 +549,10 @@ new file mode 100644 + ConversionPatternRewriter &rewriter, + Value thread) { + // Get number of repetitions across the dimensions. -+ auto aTensorTy = op.getA().getType().cast(); -+ auto bTensorTy = op.getB().getType().cast(); -+ auto dTensorTy = op.getD().getType().cast(); -+ auto mmaEnc = dTensorTy.getEncoding().cast(); ++ auto aTensorTy = cast(op.getA().getType()); ++ auto bTensorTy = cast(op.getB().getType()); ++ auto dTensorTy = cast(op.getD().getType()); ++ auto mmaEnc = cast(dTensorTy.getEncoding()); + + auto shapePerCTA = getShapePerCTA(dTensorTy); + auto shapePerCTATile = getShapePerCTATile(mmaEnc); @@ -573,7 +581,7 @@ new file mode 100644 + auto sharedObj = getSharedMemoryObjectFromStruct( + loc, arg, typeConverter->convertType(tensorTy.getElementType()), + rewriter); -+ auto sharedLayout = tensorTy.getEncoding().cast(); ++ auto sharedLayout = cast(tensorTy.getEncoding()); + auto shape = getShapePerCTA(tensorTy); + auto ord = sharedLayout.getOrder(); + int byteSize = aTensorTy.getElementTypeBitWidth() / 8; @@ -671,9 +679,9 @@ new file mode 100644 + SparseDotOp::Adaptor adaptor, + const LLVMTypeConverter *typeConverter, + ConversionPatternRewriter &rewriter) { -+ auto resultTy = op.getResult().getType().cast(); ++ auto resultTy = cast(op.getResult().getType()); + NvidiaMmaEncodingAttr mmaLayout = -+ resultTy.getEncoding().cast(); ++ cast(resultTy.getEncoding()); + + if (mmaLayout.isAmpere()) { + return convertSparseMMA(op, adaptor, typeConverter, rewriter); @@ -687,9 +695,10 @@ new file mode 100644 + "Unsupported SparseDotOp found when converting TritonGPU to LLVM."); +} diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp +index 738f0fe04..867939f65 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp -@@ -87,8 +87,8 @@ int64_t getSwizzlingFromLayout(const Sha +@@ -88,8 +88,8 @@ int64_t getSwizzlingFromLayout(const SharedEncodingAttr &layout, return swizzlingByteWidth; } diff --git a/third_party/xla/third_party/triton/xla_extensions/sparse_dot_fixes_y24w17.patch b/third_party/xla/third_party/triton/xla_extensions/sparse_dot_fixes_y24w17.patch index 9d1ae2e91cae3f..ce009aa688e9bf 100644 --- a/third_party/xla/third_party/triton/xla_extensions/sparse_dot_fixes_y24w17.patch +++ b/third_party/xla/third_party/triton/xla_extensions/sparse_dot_fixes_y24w17.patch @@ -1,30 +1,8 @@ -diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp ---- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp -+++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp -@@ -22,16 +22,16 @@ Value convertLayout( - // Calculate tile size as number of mask elements (4xi4). - NvidiaMmaEncodingAttr mmaLayout = - sparseEncoding.getParent().cast(); -+ SmallVector warpsPerCTA = mmaLayout.getWarpsPerCTA(); - SmallVector shapePerCTATile = { -- kTileSize * mmaLayout.getWarpsPerCTA()[0], -- kTileSize / kMetadataElementsPerPackedValue}; -+ kTileSize * warpsPerCTA[0], kTileSize / kMetadataElementsPerPackedValue}; - Value strideM = smemObj.strides[0]; - Value strideK = smemObj.strides[1]; - - // Calculate offset in the tile for the current thread. - Value threadsPerWarp = i32_val(kThreadsPerWarp); - Value warpId = udiv(thread, threadsPerWarp); -- Value warpGroupId = urem(warpId, i32_val(shapePerCTATile[0] / kTileSize)); -+ Value warpGroupId = udiv(warpId, i32_val(warpsPerCTA[1])); - Value laneId = urem(thread, threadsPerWarp); - Value laneGroupId = udiv(laneId, i32_val(kThreadsInGroup)); - Value columnId = urem(laneId, i32_val(shapePerCTATile[1])); diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp +index 0516fc56f..1f27f8a43 100644 --- a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp +++ b/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp -@@ -139,6 +139,7 @@ class BlockedToMMA : public mlir::Rewrit +@@ -142,6 +142,7 @@ class BlockedToMMA : public mlir::RewritePattern { mlir::TypeID::get()); } @@ -32,7 +10,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect // Finds the first different bitwidth in the chain of shape-preserving // unary ops that x depends on. // There are two primary scenarios: -@@ -172,7 +173,6 @@ class BlockedToMMA : public mlir::Rewrit +@@ -175,7 +176,6 @@ class BlockedToMMA : public mlir::RewritePattern { return origBitWidth; } @@ -40,7 +18,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect BlockedToMMA(mlir::MLIRContext *context, int computeCapability) : mlir::RewritePattern(tt::DotOp::getOperationName(), 2, context), computeCapability(computeCapability) {} -@@ -388,18 +388,22 @@ class SparseBlockedToMMA : public mlir:: +@@ -389,18 +389,22 @@ class SparseBlockedToMMA : public mlir::RewritePattern { newRetType, oldAcc); if (versionMajor == 2) { @@ -49,7 +27,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + int kWidth = 32 / minBitwidth; + // convert A operand - auto oldAType = a.getType().cast(); + auto oldAType = cast(a.getType()); - auto newAEncoding = ttg::DotOperandEncodingAttr::get( - ctx, 0, mmaEnc, oldAType.getElementType()); + auto newAEncoding = @@ -59,7 +37,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect a = rewriter.create(a.getLoc(), newAType, a); // convert B operand - auto oldBType = b.getType().cast(); + auto oldBType = cast(b.getType()); - auto newBEncoding = ttg::DotOperandEncodingAttr::get( - ctx, 1, mmaEnc, oldBType.getElementType()); + auto newBEncoding = @@ -67,3 +45,27 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect auto newBType = RankedTensorType::get( oldBType.getShape(), oldBType.getElementType(), newBEncoding); b = rewriter.create(b.getLoc(), newBType, b); +diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp +index 3011cf73d..ea587dced 100644 +--- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp ++++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp +@@ -22,16 +22,16 @@ Value convertLayout( + // Calculate tile size as number of mask elements (4xi4). + NvidiaMmaEncodingAttr mmaLayout = + cast(sparseEncoding.getParent()); ++ SmallVector warpsPerCTA = mmaLayout.getWarpsPerCTA(); + SmallVector shapePerCTATile = { +- kTileSize * mmaLayout.getWarpsPerCTA()[0], +- kTileSize / kMetadataElementsPerPackedValue}; ++ kTileSize * warpsPerCTA[0], kTileSize / kMetadataElementsPerPackedValue}; + Value strideM = smemObj.strides[0]; + Value strideK = smemObj.strides[1]; + + // Calculate offset in the tile for the current thread. + Value threadsPerWarp = i32_val(kThreadsPerWarp); + Value warpId = udiv(thread, threadsPerWarp); +- Value warpGroupId = urem(warpId, i32_val(shapePerCTATile[0] / kTileSize)); ++ Value warpGroupId = udiv(warpId, i32_val(warpsPerCTA[1])); + Value laneId = urem(thread, threadsPerWarp); + Value laneGroupId = udiv(laneId, i32_val(kThreadsInGroup)); + Value columnId = urem(laneId, i32_val(shapePerCTATile[1])); diff --git a/third_party/xla/third_party/triton/xla_extensions/sparse_dot_fixes_y24w19.patch b/third_party/xla/third_party/triton/xla_extensions/sparse_dot_fixes_y24w19.patch index 8ac91d153690fd..775ed317d1f9b9 100644 --- a/third_party/xla/third_party/triton/xla_extensions/sparse_dot_fixes_y24w19.patch +++ b/third_party/xla/third_party/triton/xla_extensions/sparse_dot_fixes_y24w19.patch @@ -11,3 +11,21 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect // get MMA encoding for the given number of warps auto retShapePerCTA = ttg::getShapePerCTA(oldRetType); +diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp +--- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp ++++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ConvertLayoutOpToLLVM/SharedToSparseDotOperand.cpp +@@ -31,7 +31,13 @@ Value convertLayout( + // Calculate offset in the tile for the current thread. + Value threadsPerWarp = i32_val(kThreadsPerWarp); + Value warpId = udiv(thread, threadsPerWarp); +- Value warpGroupId = udiv(warpId, i32_val(warpsPerCTA[1])); ++ Value warpGroupId; ++ if (mmaLayout.isHopper()) { ++ warpGroupId = urem(warpId, i32_val(warpsPerCTA[0])); ++ } else { ++ assert(mmaLayout.isAmpere()); ++ warpGroupId = udiv(warpId, i32_val(warpsPerCTA[1])); ++ } + Value laneId = urem(thread, threadsPerWarp); + Value laneGroupId = udiv(laneId, i32_val(kThreadsInGroup)); + Value columnId = urem(laneId, i32_val(shapePerCTATile[1])); diff --git a/third_party/xla/third_party/triton/xla_extensions/sparse_dot_nvgpu.patch b/third_party/xla/third_party/triton/xla_extensions/sparse_dot_nvgpu.patch index ced13cff33fd16..791618363b2f34 100644 --- a/third_party/xla/third_party/triton/xla_extensions/sparse_dot_nvgpu.patch +++ b/third_party/xla/third_party/triton/xla_extensions/sparse_dot_nvgpu.patch @@ -1,7 +1,8 @@ -diff --git a/include/triton/Dialect/NVGPU/IR/NVGPUOps.td b/include/triton/Dialect/NVGPU/IR/NVGPUOps.td ---- a/include/triton/Dialect/NVGPU/IR/NVGPUOps.td -+++ b/include/triton/Dialect/NVGPU/IR/NVGPUOps.td -@@ -87,6 +87,15 @@ def NVGPU_WGMMAOp : NVGPU_Op<"wgmma", [] +diff --git a/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td b/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td +index ca9d18873..d39bc6ec4 100644 +--- a/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td ++++ b/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td +@@ -87,6 +87,15 @@ def NVGPU_WGMMAOp : NVGPU_Op<"wgmma", []> { let assemblyFormat = "$opA `,` $opB (`,` $opC^)? attr-dict `:` functional-type(operands, $res)"; } @@ -18,9 +19,10 @@ diff --git a/include/triton/Dialect/NVGPU/IR/NVGPUOps.td b/include/triton/Dialec let arguments = (ins LLVM_AnyPointer:$addr, I32:$ctaId, I32Attr:$bitwidth, I32Attr:$vec); let builders = [ diff --git a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp b/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp +index e19216520..aacbfb569 100644 --- a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp +++ b/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp -@@ -688,6 +688,84 @@ public: +@@ -668,6 +668,84 @@ public: } }; @@ -31,7 +33,7 @@ diff --git a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp b/third_part + using Base::Base; + + std::vector getOutputConstraints(ttn::SparseWGMMAOp op) const { -+ auto outputStructType = op.getType().cast(); ++ auto outputStructType = cast(op.getType()); + uint32_t numOutputRegs = outputStructType.getBody().size(); + std::string output = + outputStructType.getBody().front().isF32() ? "=f" : "=r"; @@ -71,7 +73,7 @@ diff --git a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp b/third_part + + // Output and operand C + uint32_t numCRegs = -+ op.getType().cast().getBody().size(); ++ cast(op.getType()).getBody().size(); + args += "{"; + for (uint32_t i = 0; i < numCRegs; ++i) { + args += "$" + std::to_string(asmOpIdx++) + (i == numCRegs - 1 ? "" : ","); @@ -105,13 +107,17 @@ diff --git a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp b/third_part class ConvertNVGPUToLLVM : public ConvertNVGPUToLLVMBase { public: -@@ -711,7 +789,8 @@ public: +@@ -688,10 +766,9 @@ public: + patterns.add>( + context, Cluster_Cta_Id_Op, Constraints({"=r"}), Constraints()); - patterns.add(context); -+ WGMMAWaitGroupOpPattern, StoreDSmemOpPattern, -+ SparseWGMMAOpPattern>(context); +- patterns +- .add( +- context); ++ patterns.add(context); if (applyPatternsAndFoldGreedily(mod, std::move(patterns)).failed()) signalPassFailure(); diff --git a/third_party/xla/third_party/triton/xla_extensions/sparse_dot_passes.patch b/third_party/xla/third_party/triton/xla_extensions/sparse_dot_passes.patch index 74662cf3c90dc9..9136cb84b24254 100644 --- a/third_party/xla/third_party/triton/xla_extensions/sparse_dot_passes.patch +++ b/third_party/xla/third_party/triton/xla_extensions/sparse_dot_passes.patch @@ -1,7 +1,8 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp +index 4aa2712ec..16a6253d7 100644 --- a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp +++ b/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp -@@ -277,6 +277,89 @@ struct TritonDotPattern : public OpConve +@@ -279,6 +279,89 @@ struct TritonDotPattern : public OpConversionPattern { } }; @@ -12,7 +13,7 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co + LogicalResult matchAndRewrite( + triton::gpu::SparseDotOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { -+ RankedTensorType origType = op.getType().cast(); ++ RankedTensorType origType = cast(op.getType()); + auto origShape = origType.getShape(); + auto typeConverter = getTypeConverter(); + int numWarps = typeConverter->getNumWarps(); @@ -40,8 +41,8 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co + RankedTensorType::get(origShape, origType.getElementType(), dEncoding); + + // a & b must be of smem layout -+ auto aType = adaptor.getA().getType().cast(); -+ auto bType = adaptor.getB().getType().cast(); ++ auto aType = cast(adaptor.getA().getType()); ++ auto bType = cast(adaptor.getB().getType()); + Type aEltType = aType.getElementType(); + Type bEltType = bType.getElementType(); + Attribute aEncoding = aType.getEncoding(); @@ -51,14 +52,14 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co + Value a = adaptor.getA(); + Value b = adaptor.getB(); + Value c = adaptor.getC(); -+ if (!aEncoding.isa()) { ++ if (!isa(aEncoding)) { + Attribute encoding = triton::gpu::DotOperandEncodingAttr::get( + getContext(), 0, dEncoding, aEltType); + auto dstType = + RankedTensorType::get(aType.getShape(), aEltType, encoding); + a = rewriter.create(a.getLoc(), dstType, a); + } -+ if (!bEncoding.isa()) { ++ if (!isa(bEncoding)) { + Attribute encoding = triton::gpu::DotOperandEncodingAttr::get( + getContext(), 1, dEncoding, bEltType); + auto dstType = @@ -68,11 +69,11 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co + c = rewriter.create(c.getLoc(), retType, c); + + // aMeta must be of smem layout -+ auto aMetaType = adaptor.getAMeta().getType().cast(); ++ auto aMetaType = cast(adaptor.getAMeta().getType()); + Attribute aMetaEncoding = aMetaType.getEncoding(); + if (!aMetaEncoding) return failure(); + Value aMeta = adaptor.getAMeta(); -+ if (!aMetaEncoding.isa()) { ++ if (!isa(aMetaEncoding)) { + Attribute encoding = + triton::gpu::SparseDotMetaEncodingAttr::get(getContext(), dEncoding); + auto dstType = RankedTensorType::get( @@ -91,17 +92,17 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co struct TritonCatPattern : public OpConversionPattern { using OpConversionPattern::OpConversionPattern; -@@ -550,6 +633,7 @@ void populateTritonPatterns(TritonGPUTyp - GenericOpPattern, GenericOpPattern, +@@ -553,6 +636,7 @@ void populateTritonPatterns(TritonGPUTypeConverter &typeConverter, + GenericOpPattern, GenericOpPattern, TritonFuncOpPattern>(typeConverter, context); + patterns.insert(typeConverter, context); } // -@@ -788,6 +872,12 @@ public: - IntegerAttr::get( - i32_ty, llvm::APInt(32, computeCapability.getValue()))); +@@ -794,6 +878,12 @@ public: + mod->setAttr(AttrTargetName, + StringAttr::get(context, this->target.getValue())); + // Only transform sparse dot op with undefined layout. + target.addDynamicallyLegalOp( @@ -113,9 +114,10 @@ diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Co return signalPassFailure(); diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp +index 098ee85e4..0516fc56f 100644 --- a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp +++ b/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp -@@ -42,8 +42,9 @@ static int getMMAVersionSafe(int compute +@@ -44,8 +44,9 @@ static int getMMAVersionSafe(int computeCapability, tt::DotOp op) { return 0; } @@ -126,7 +128,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect auto rank = shape.size(); // Early exit for batched matmul if (rank == 3) -@@ -56,14 +57,14 @@ warpsPerTileV2(tt::DotOp dotOp, const Ar +@@ -58,8 +59,8 @@ warpsPerTileV2(tt::DotOp dotOp, const ArrayRef shape, int numWarps) { auto slices = multiRootGetSlice(dotOp, {filter}, {filter}); bool hasChainedDot = false; for (Operation *op : slices) { @@ -137,14 +139,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect auto resTy = chainedDot.getResult().getType(); if (resTy.getRank() != rank) { continue; - } - if (auto mmaEncoding = -- resTy.getEncoding().dyn_cast()) { -+ resTy.getEncoding().template dyn_cast()) { - return ttg::getWarpsPerCTA(mmaEncoding); - } - hasChainedDot = true; -@@ -101,12 +102,13 @@ warpsPerTileV2(tt::DotOp dotOp, const Ar +@@ -103,12 +104,13 @@ warpsPerTileV2(tt::DotOp dotOp, const ArrayRef shape, int numWarps) { return ret; } @@ -162,7 +157,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect slices.end()) return {(unsigned)numWarps, 1}; -@@ -175,9 +177,10 @@ public: +@@ -178,9 +180,10 @@ public: : mlir::RewritePattern(tt::DotOp::getOperationName(), 2, context), computeCapability(computeCapability) {} @@ -176,7 +171,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect switch (version) { case 2: return warpsPerTileV2(dotOp, shape, numWarps); -@@ -337,6 +340,98 @@ public: +@@ -335,6 +338,98 @@ public: return success(); } }; @@ -201,7 +196,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + // Check data-types and SM compatibility + RankedTensorType oldRetType = dotOp.getType(); + if (!oldRetType.getEncoding() || -+ oldRetType.getEncoding().isa()) ++ isa(oldRetType.getEncoding())) + return failure(); + + assert(computeCapability >= 80 && @@ -216,7 +211,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + + auto instrShape = + mmaVersionToInstrShape(versionMajor, retShapePerCTA, -+ a.getType().cast(), numWarps); ++ cast(a.getType()), numWarps); + auto warpsPerTile = BlockedToMMA::getWarpsPerTile( + dotOp, retShapePerCTA, versionMajor, numWarps, instrShape); + ttg::NvidiaMmaEncodingAttr mmaEnc = @@ -232,7 +227,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + + if (versionMajor == 2) { + // convert A operand -+ auto oldAType = a.getType().cast(); ++ auto oldAType = cast(a.getType()); + auto newAEncoding = ttg::DotOperandEncodingAttr::get( + ctx, 0, mmaEnc, oldAType.getElementType()); + auto newAType = RankedTensorType::get( @@ -240,7 +235,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + a = rewriter.create(a.getLoc(), newAType, a); + + // convert B operand -+ auto oldBType = b.getType().cast(); ++ auto oldBType = cast(b.getType()); + auto newBEncoding = ttg::DotOperandEncodingAttr::get( + ctx, 1, mmaEnc, oldBType.getElementType()); + auto newBType = RankedTensorType::get( @@ -253,7 +248,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect + + // convert metadata + Value meta = dotOp.getAMeta(); -+ auto oldMetaType = meta.getType().cast(); ++ auto oldMetaType = cast(meta.getType()); + auto newMetaType = RankedTensorType::get( + oldMetaType.getShape(), oldMetaType.getElementType(), + SparseDotMetaEncodingAttr::get(ctx, mmaEnc)); @@ -275,7 +270,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect } // namespace static Value promoteOperand(OpBuilder &builder, Location loc, Value operand, -@@ -397,6 +491,7 @@ public: +@@ -394,6 +489,7 @@ public: mlir::RewritePatternSet patterns(context); patterns.add<::BlockedToMMA>(context, computeCapability); @@ -284,33 +279,31 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp b/lib/Dialect signalPassFailure(); } diff --git a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp b/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp +index 97ca6a840..f0ef124ff 100644 --- a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp +++ b/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp -@@ -47,6 +47,10 @@ struct PipelinedOpInfo { - bool loadIsMMAV3 = false; +@@ -188,6 +188,10 @@ public: + } }; -+bool isDotOp(Operation* op) { ++static bool isDotOp(Operation* op) { + return isa(op); +} + - } // namespace - static bool isMMAv3Dot(Operation *op) { -@@ -165,22 +169,28 @@ getSharedEncIfAllUsersAreDotEnc(Value val) { + auto dot = dyn_cast(op); + if (!dot) +@@ -399,19 +403,28 @@ getSharedEncIfAllUsersAreDotEnc(Value val) { } else { if (!isa(user)) return std::nullopt; -- auto dotOpEnc = user->getResult(0) -- .getType() -- .cast() -- .getEncoding() -- .dyn_cast(); +- auto dotOpEnc = dyn_cast( +- cast(user->getResult(0).getType()).getEncoding()); - if (!dotOpEnc) + auto enc = -+ user->getResult(0).getType().cast().getEncoding(); ++ cast(user->getResult(0).getType()).getEncoding(); + if (isa(enc)) { -+ auto srcTy = val.getType().cast(); ++ auto srcTy = cast(val.getType()); + auto CTALayout = ttg::getCTALayout(srcTy.getEncoding()); + auto order = ttg::getOrder(srcTy.getEncoding()); + unsigned bitWidth = srcTy.getElementType().getIntOrFloatBitWidth(); @@ -321,14 +314,14 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp b + srcTy.getElementType().getIntOrFloatBitWidth(), + /*needTrans=*/false); + } else if (isa(enc)) { -+ auto srcTy = val.getType().cast(); ++ auto srcTy = cast(val.getType()); + tempAttr = ttg::SharedEncodingAttr::get( + val.getContext(), /*vec=*/1, /*perPhase=*/1, /*maxPhase=*/1, + ttg::getOrder(srcTy.getEncoding()), + ttg::getCTALayout(srcTy.getEncoding())); + } else { return std::nullopt; -- auto srcTy = val.getType().cast(); +- auto srcTy = cast(val.getType()); - auto CTALayout = ttg::getCTALayout(srcTy.getEncoding()); - auto order = ttg::getOrder(srcTy.getEncoding()); - unsigned bitWidth = srcTy.getElementType().getIntOrFloatBitWidth(); @@ -341,71 +334,63 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/Pipeliner/MatmulLoopPipeline.cpp b } // Check that the shared encodings needed by the users are compatible. if (!tempAttr || (attr != nullptr && attr != tempAttr)) -@@ -313,7 +323,7 @@ loadOpsToDistanceAndUse(scf::ForOp forOp) { +@@ -518,7 +531,7 @@ loadOpsToIndirectionLevelAndUse(scf::ForOp forOp) { }; for (Operation &op : forOp.getBody()->without_terminator()) { - if (!isa(op)) + if (!isDotOp(&op)) continue; + seen.clear(); dfs(&op, 0, &op); - } -@@ -391,7 +401,8 @@ collectOpsToPipeline(scf::ForOp forOp, - // loads. - for (auto &[loadOp, distAndUse] : loadOpToDistAndUse) { - PipelinedOpInfo loadInfo; -- if (auto dot = dyn_cast(distAndUse.second)) { -+ if (isDotOp(distAndUse.second)) { -+ auto dot = dyn_cast(distAndUse.second); - if (loadIsMMAv3(loadOp)) { +@@ -595,7 +608,8 @@ assignMemoryLayouts(llvm::SmallVector> + continue; + } + +- if (auto dot = dyn_cast(use)) { ++ if (isDotOp(use)) { ++ auto dot = dyn_cast(use); + loadInfo.usedByDot = true; + if (loadIsMMAv3(op)) { loadInfo.loadIsMMAV3 = true; - loadInfo.sharedEncoding = -@@ -410,7 +421,7 @@ collectOpsToPipeline(scf::ForOp forOp, +@@ -614,7 +628,7 @@ assignMemoryLayouts(llvm::SmallVector> // The codegen bug is caught by an assertion, so if you think you've // fixed it, feel free to delete this code and see if the assert still // fails. :) - if (!loadInfo.sharedEncoding) { + if (dot && !loadInfo.sharedEncoding) { - if (auto dotEnc = dot.getResult() - .getType() - .getEncoding() -@@ -788,7 +799,7 @@ bool mlir::triton::preProcessLoopAndGetSchedule( - int useStage = opToInfo[info.use].stage; - int numBuffers = useStage - defStage; - -- if (hasMMAV3 && isa(info.use)) { -+ if (hasMMAV3 && isDotOp(info.use)) { - // For MMAv3, we need an extra buffer as this is assumed in the wgmma - // pipelining post-processing. - numBuffers++; + if (auto dotEnc = dyn_cast( + dot.getResult().getType().getEncoding())) { + auto loadTy = cast(op->getResultTypes()[0]); diff --git a/lib/Dialect/TritonGPU/Transforms/ReduceDataDuplication.cpp b/lib/Dialect/TritonGPU/Transforms/ReduceDataDuplication.cpp +index 2211df31b..ee5ff44d8 100644 --- a/lib/Dialect/TritonGPU/Transforms/ReduceDataDuplication.cpp +++ b/lib/Dialect/TritonGPU/Transforms/ReduceDataDuplication.cpp -@@ -36,6 +36,10 @@ public: +@@ -37,6 +37,10 @@ public: auto srcEncoding = srcType.getEncoding(); - if (srcEncoding.isa()) + if (isa(srcEncoding)) return; -+ if (dstType.getEncoding().isa()) { ++ if (isa(dstType.getEncoding())) { + replaceSparseMetaEncoding(cvtOp); + return; + } auto dstDotOp = - dstType.getEncoding().dyn_cast(); + dyn_cast(dstType.getEncoding()); if (!dstDotOp) -@@ -74,6 +78,27 @@ public: +@@ -83,6 +87,27 @@ public: cvtOp.erase(); }); } + + private: + void replaceSparseMetaEncoding(triton::gpu::ConvertLayoutOp cvtOp) { -+ auto srcType = cvtOp.getOperand().getType().cast(); ++ auto srcType = cast(cvtOp.getOperand().getType()); + auto srcEncoding = srcType.getEncoding(); + auto sharedLayout = triton::gpu::SharedEncodingAttr::get( + cvtOp.getContext(), 8, 1, 1, triton::gpu::getOrder(srcEncoding), + triton::gpu::getCTALayout(srcEncoding)); + -+ auto dstType = cvtOp.getType().cast(); ++ auto dstType = cast(cvtOp.getType()); + auto tmpType = triton::MemDescType::get( + dstType.getShape(), dstType.getElementType(), sharedLayout); + @@ -421,6 +406,7 @@ diff --git a/lib/Dialect/TritonGPU/Transforms/ReduceDataDuplication.cpp b/lib/Di std::unique_ptr mlir::triton::gpu::createReduceDataDuplicationPass() { diff --git a/lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp b/lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp +index f456d36a6..a1dac2b72 100644 --- a/lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp +++ b/lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp @@ -45,7 +45,7 @@ public: @@ -432,7 +418,7 @@ diff --git a/lib/Dialect/TritonNvidiaGPU/Transforms/FenceInsertion.cpp b/lib/Dia return WalkResult::advance(); OpBuilder builder(op); auto a = op->getOperand(0); -@@ -83,7 +83,7 @@ private: +@@ -80,7 +80,7 @@ private: static DenseSet> trace; auto op = operand.getDefiningOp(); // avoid redundant insertion diff --git a/third_party/xla/third_party/tsl/WORKSPACE b/third_party/xla/third_party/tsl/WORKSPACE index 6ad0d6e0e2b7be..19350e3dbba762 100644 --- a/third_party/xla/third_party/tsl/WORKSPACE +++ b/third_party/xla/third_party/tsl/WORKSPACE @@ -10,6 +10,31 @@ workspace(name = "tsl") # buildifier: disable=load-on-top +# Initialize hermetic Python +load("//third_party/py:python_init_rules.bzl", "python_init_rules") + +python_init_rules() + +load("//third_party/py:python_init_repositories.bzl", "python_init_repositories") + +python_init_repositories( + requirements = { + "3.11": "//:requirements_lock_3_11.txt", + }, +) + +load("//third_party/py:python_init_toolchains.bzl", "python_init_toolchains") + +python_init_toolchains() + +load("//third_party/py:python_init_pip.bzl", "python_init_pip") + +python_init_pip() + +load("@pypi//:requirements.bzl", "install_deps") + +install_deps() + load(":workspace3.bzl", "tsl_workspace3") tsl_workspace3() diff --git a/third_party/xla/third_party/tsl/opensource_only.files b/third_party/xla/third_party/tsl/opensource_only.files index 3f2bcf8431edc0..709cacf5c1756e 100644 --- a/third_party/xla/third_party/tsl/opensource_only.files +++ b/third_party/xla/third_party/tsl/opensource_only.files @@ -73,13 +73,17 @@ third_party/nccl/system.BUILD.tpl: third_party/nvtx/BUILD: third_party/nvtx/LICENSE: third_party/protobuf/BUILD: -third_party/py/non_hermetic/BUILD.tpl: -third_party/py/non_hermetic/BUILD: -third_party/py/non_hermetic/README: -third_party/py/non_hermetic/ml_dtypes/BUILD: -third_party/py/non_hermetic/ml_dtypes/LICENSE: -third_party/py/non_hermetic/numpy/BUILD: -third_party/py/non_hermetic/python_configure.bzl: +third_party/py/BUILD.tpl: +third_party/py/BUILD: +third_party/py/ml_dtypes/BUILD: +third_party/py/ml_dtypes/LICENSE: +third_party/py/numpy/BUILD: +third_party/py/python_configure.bzl: +third_party/py/python_init_pip.bzl: +third_party/py/python_init_repositories.bzl: +third_party/py/python_init_rules.bzl: +third_party/py/python_init_toolchains.bzl: +third_party/py/python_repo.bzl: third_party/pybind11.BUILD: third_party/pybind11_bazel/BUILD: third_party/python_runtime/BUILD: diff --git a/third_party/xla/third_party/tsl/requirements_lock_3_11.txt b/third_party/xla/third_party/tsl/requirements_lock_3_11.txt new file mode 100644 index 00000000000000..5c4bb687dfecae --- /dev/null +++ b/third_party/xla/third_party/tsl/requirements_lock_3_11.txt @@ -0,0 +1,49 @@ +numpy==1.24.3 \ + --hash=sha256:0ec87a7084caa559c36e0a2309e4ecb1baa03b687201d0a847c8b0ed476a7187 \ + --hash=sha256:1a7d6acc2e7524c9955e5c903160aa4ea083736fde7e91276b0e5d98e6332812 \ + --hash=sha256:202de8f38fc4a45a3eea4b63e2f376e5f2dc64ef0fa692838e31a808520efaf7 \ + --hash=sha256:210461d87fb02a84ef243cac5e814aad2b7f4be953b32cb53327bb49fd77fbb4 \ + --hash=sha256:2d926b52ba1367f9acb76b0df6ed21f0b16a1ad87c6720a1121674e5cf63e2b6 \ + --hash=sha256:352ee00c7f8387b44d19f4cada524586f07379c0d49270f87233983bc5087ca0 \ + --hash=sha256:35400e6a8d102fd07c71ed7dcadd9eb62ee9a6e84ec159bd48c28235bbb0f8e4 \ + --hash=sha256:3c1104d3c036fb81ab923f507536daedc718d0ad5a8707c6061cdfd6d184e570 \ + --hash=sha256:4719d5aefb5189f50887773699eaf94e7d1e02bf36c1a9d353d9f46703758ca4 \ + --hash=sha256:4749e053a29364d3452c034827102ee100986903263e89884922ef01a0a6fd2f \ + --hash=sha256:5342cf6aad47943286afa6f1609cad9b4266a05e7f2ec408e2cf7aea7ff69d80 \ + --hash=sha256:56e48aec79ae238f6e4395886b5eaed058abb7231fb3361ddd7bfdf4eed54289 \ + --hash=sha256:76e3f4e85fc5d4fd311f6e9b794d0c00e7002ec122be271f2019d63376f1d385 \ + --hash=sha256:7776ea65423ca6a15255ba1872d82d207bd1e09f6d0894ee4a64678dd2204078 \ + --hash=sha256:784c6da1a07818491b0ffd63c6bbe5a33deaa0e25a20e1b3ea20cf0e43f8046c \ + --hash=sha256:8535303847b89aa6b0f00aa1dc62867b5a32923e4d1681a35b5eef2d9591a463 \ + --hash=sha256:9a7721ec204d3a237225db3e194c25268faf92e19338a35f3a224469cb6039a3 \ + --hash=sha256:a1d3c026f57ceaad42f8231305d4653d5f05dc6332a730ae5c0bea3513de0950 \ + --hash=sha256:ab344f1bf21f140adab8e47fdbc7c35a477dc01408791f8ba00d018dd0bc5155 \ + --hash=sha256:ab5f23af8c16022663a652d3b25dcdc272ac3f83c3af4c02eb8b824e6b3ab9d7 \ + --hash=sha256:ae8d0be48d1b6ed82588934aaaa179875e7dc4f3d84da18d7eae6eb3f06c242c \ + --hash=sha256:c91c4afd8abc3908e00a44b2672718905b8611503f7ff87390cc0ac3423fb096 \ + --hash=sha256:d5036197ecae68d7f491fcdb4df90082b0d4960ca6599ba2659957aafced7c17 \ + --hash=sha256:d6cc757de514c00b24ae8cf5c876af2a7c3df189028d68c0cb4eaa9cd5afc2bf \ + --hash=sha256:d933fabd8f6a319e8530d0de4fcc2e6a61917e0b0c271fded460032db42a0fe4 \ + --hash=sha256:ea8282b9bcfe2b5e7d491d0bf7f3e2da29700cec05b49e64d6246923329f2b02 \ + --hash=sha256:ecde0f8adef7dfdec993fd54b0f78183051b6580f606111a6d789cd14c61ea0c \ + --hash=sha256:f21c442fdd2805e91799fbe044a7b999b8571bb0ab0f7850d0cb9641a687092b +lit==17.0.6 \ + --hash=sha256:dfa9af9b55fc4509a56be7bf2346f079d7f4a242d583b9f2e0b078fd0abae31b +ml-dtypes==0.3.2 \ + --hash=sha256:2c34f2ba9660b21fe1034b608308a01be82bbef2a92fb8199f24dc6bad0d5226 \ + --hash=sha256:3a17ef2322e60858d93584e9c52a5be7dd6236b056b7fa1ec57f1bb6ba043e33 \ + --hash=sha256:533059bc5f1764fac071ef54598db358c167c51a718f68f5bb55e3dee79d2967 \ + --hash=sha256:6604877d567a29bfe7cc02969ae0f2425260e5335505cf5e7fefc3e5465f5655 \ + --hash=sha256:6b35c4e8ca957c877ac35c79ffa77724ecc3702a1e4b18b08306c03feae597bb \ + --hash=sha256:763697ab8a88d47443997a7cdf3aac7340049aed45f7521f6b0ec8a0594821fe \ + --hash=sha256:7a4c3fcbf86fa52d0204f07cfd23947ef05b4ad743a1a988e163caa34a201e5e \ + --hash=sha256:7afde548890a92b41c0fed3a6c525f1200a5727205f73dc21181a2726571bb53 \ + --hash=sha256:7ba8e1fafc7fff3e643f453bffa7d082df1678a73286ce8187d3e825e776eb94 \ + --hash=sha256:91f8783fd1f2c23fd3b9ee5ad66b785dafa58ba3cdb050c4458021fa4d1eb226 \ + --hash=sha256:93b78f53431c93953f7850bb1b925a17f0ab5d97527e38a7e865b5b4bc5cfc18 \ + --hash=sha256:961134ea44c7b8ca63eda902a44b58cd8bd670e21d62e255c81fba0a8e70d9b7 \ + --hash=sha256:b89b194e9501a92d289c1ffd411380baf5daafb9818109a4f49b0a1b6dce4462 \ + --hash=sha256:c7b3fb3d4f6b39bcd4f6c4b98f406291f0d681a895490ee29a0f95bab850d53c \ + --hash=sha256:d1a746fe5fb9cd974a91070174258f0be129c592b93f9ce7df6cc336416c3fbd \ + --hash=sha256:e8505946df1665db01332d885c2020b4cb9e84a8b1241eb4ba69d59591f65855 \ + --hash=sha256:f47619d978ab1ae7dfdc4052ea97c636c6263e1f19bd1be0e42c346b98d15ff4 \ No newline at end of file diff --git a/third_party/xla/third_party/tsl/third_party/mkl_dnn/mkldnn_v1.BUILD b/third_party/xla/third_party/tsl/third_party/mkl_dnn/mkldnn_v1.BUILD index cc9e66d77e3a77..d36831ea4b9f53 100644 --- a/third_party/xla/third_party/tsl/third_party/mkl_dnn/mkldnn_v1.BUILD +++ b/third_party/xla/third_party/tsl/third_party/mkl_dnn/mkldnn_v1.BUILD @@ -94,8 +94,8 @@ expand_template( out = "include/oneapi/dnnl/dnnl_version.h", substitutions = { "@DNNL_VERSION_MAJOR@": "3", - "@DNNL_VERSION_MINOR@": "3", - "@DNNL_VERSION_PATCH@": "4", + "@DNNL_VERSION_MINOR@": "4", + "@DNNL_VERSION_PATCH@": "1", "@DNNL_VERSION_HASH@": "N/A", }, template = "include/oneapi/dnnl/dnnl_version.h.in", diff --git a/third_party/xla/third_party/tsl/third_party/py/BUILD b/third_party/xla/third_party/tsl/third_party/py/BUILD index e69de29bb2d1d6..84eba77ce1a7af 100644 --- a/third_party/xla/third_party/tsl/third_party/py/BUILD +++ b/third_party/xla/third_party/tsl/third_party/py/BUILD @@ -0,0 +1,40 @@ +load("@python//:defs.bzl", "compile_pip_requirements") +load("@python_version_repo//:py_version.bzl", "REQUIREMENTS") + +compile_pip_requirements( + name = "requirements", + extra_args = [ + "--allow-unsafe", + "--build-isolation", + ], + generate_hashes = True, + requirements_in = "requirements.in", + requirements_txt = REQUIREMENTS, +) + +compile_pip_requirements( + name = "requirements_nightly", + data = ["test-requirements.txt"], + extra_args = [ + "--allow-unsafe", + "--build-isolation", + "--extra-index-url=https://pypi.anaconda.org/scientific-python-nightly-wheels/simple", + "--pre", + "--upgrade", + ], + generate_hashes = False, + requirements_in = "requirements.in", + requirements_txt = REQUIREMENTS, +) + +compile_pip_requirements( + name = "requirements_dev", + extra_args = [ + "--allow-unsafe", + "--build-isolation", + "--upgrade", + ], + generate_hashes = False, + requirements_in = "requirements.in", + requirements_txt = REQUIREMENTS, +) diff --git a/third_party/xla/third_party/tsl/third_party/py/non_hermetic/BUILD b/third_party/xla/third_party/tsl/third_party/py/non_hermetic/BUILD deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/third_party/xla/third_party/tsl/third_party/py/non_hermetic/BUILD.tpl b/third_party/xla/third_party/tsl/third_party/py/non_hermetic/BUILD.tpl deleted file mode 100644 index 45480bd4a31cf8..00000000000000 --- a/third_party/xla/third_party/tsl/third_party/py/non_hermetic/BUILD.tpl +++ /dev/null @@ -1,80 +0,0 @@ -licenses(["restricted"]) - -package(default_visibility = ["//visibility:public"]) - -# Point both runtimes to the same python binary to ensure we always -# use the python binary specified by ./configure.py script. -load("@bazel_tools//tools/python:toolchain.bzl", "py_runtime_pair") - -py_runtime( - name = "py2_runtime", - interpreter_path = "%{PYTHON_BIN_PATH}", - python_version = "PY2", -) - -py_runtime( - name = "py3_runtime", - interpreter_path = "%{PYTHON_BIN_PATH}", - python_version = "PY3", -) - -py_runtime_pair( - name = "py_runtime_pair", - py2_runtime = ":py2_runtime", - py3_runtime = ":py3_runtime", -) - -toolchain( - name = "py_toolchain", - toolchain = ":py_runtime_pair", - toolchain_type = "@bazel_tools//tools/python:toolchain_type", - target_compatible_with = [%{PLATFORM_CONSTRAINT}], - exec_compatible_with = [%{PLATFORM_CONSTRAINT}], -) - -# To build Python C/C++ extension on Windows, we need to link to python import library pythonXY.lib -# See https://docs.python.org/3/extending/windows.html -cc_import( - name = "python_lib", - interface_library = select({ - ":windows": ":python_import_lib", - # A placeholder for Unix platforms which makes --no_build happy. - "//conditions:default": "not-existing.lib", - }), - system_provided = 1, -) - -cc_library( - name = "python_headers", - hdrs = [":python_include"], - deps = select({ - ":windows": [":python_lib"], - "//conditions:default": [], - }), - includes = ["python_include"], -) - -# This alias is exists for the use of targets in the @llvm-project dependency, -# which expect a python_headers target called @python_runtime//:headers. We use -# a repo_mapping to alias python_runtime to this package, and an alias to create -# the correct target. -alias( - name = "headers", - actual = ":python_headers", -) - -cc_library( - name = "numpy_headers", - hdrs = [":numpy_include"], - includes = ["numpy_include"], -) - -config_setting( - name = "windows", - values = {"cpu": "x64_windows"}, - visibility = ["//visibility:public"], -) - -%{PYTHON_INCLUDE_GENRULE} -%{NUMPY_INCLUDE_GENRULE} -%{PYTHON_IMPORT_LIB_GENRULE} \ No newline at end of file diff --git a/third_party/xla/third_party/tsl/third_party/py/non_hermetic/README b/third_party/xla/third_party/tsl/third_party/py/non_hermetic/README deleted file mode 100644 index 62188a5817a09e..00000000000000 --- a/third_party/xla/third_party/tsl/third_party/py/non_hermetic/README +++ /dev/null @@ -1,3 +0,0 @@ -This is a temporary copy of python_configure repository rule. It allows -XLA and TSL to keep non-hermetic Python while TF uses hermetic Python. -DO NOT DEPEND ON THIS COPY as it will be deleted soon. \ No newline at end of file diff --git a/third_party/xla/third_party/tsl/third_party/py/non_hermetic/python_configure.bzl b/third_party/xla/third_party/tsl/third_party/py/non_hermetic/python_configure.bzl deleted file mode 100644 index 89732c3e33d8ee..00000000000000 --- a/third_party/xla/third_party/tsl/third_party/py/non_hermetic/python_configure.bzl +++ /dev/null @@ -1,312 +0,0 @@ -"""Repository rule for Python autoconfiguration. - -`python_configure` depends on the following environment variables: - - * `PYTHON_BIN_PATH`: location of python binary. - * `PYTHON_LIB_PATH`: Location of python libraries. -""" - -load( - "//third_party/remote_config:common.bzl", - "BAZEL_SH", - "PYTHON_BIN_PATH", - "PYTHON_LIB_PATH", - "TF_PYTHON_CONFIG_REPO", - "auto_config_fail", - "config_repo_label", - "execute", - "get_bash_bin", - "get_host_environ", - "get_python_bin", - "is_windows", - "raw_exec", - "read_dir", -) - -def _genrule(src_dir, genrule_name, command, outs): - """Returns a string with a genrule. - - Genrule executes the given command and produces the given outputs. - """ - return ( - "genrule(\n" + - ' name = "' + - genrule_name + '",\n' + - " outs = [\n" + - outs + - "\n ],\n" + - ' cmd = """\n' + - command + - '\n """,\n' + - ")\n" - ) - -def _norm_path(path): - """Returns a path with '/' and remove the trailing slash.""" - path = path.replace("\\", "/") - if path[-1] == "/": - path = path[:-1] - return path - -def _symlink_genrule_for_dir( - repository_ctx, - src_dir, - dest_dir, - genrule_name, - src_files = [], - dest_files = []): - """Returns a genrule to symlink(or copy if on Windows) a set of files. - - If src_dir is passed, files will be read from the given directory; otherwise - we assume files are in src_files and dest_files - """ - if src_dir != None: - src_dir = _norm_path(src_dir) - dest_dir = _norm_path(dest_dir) - files = "\n".join(read_dir(repository_ctx, src_dir)) - - # Create a list with the src_dir stripped to use for outputs. - dest_files = files.replace(src_dir, "").splitlines() - src_files = files.splitlines() - command = [] - outs = [] - for i in range(len(dest_files)): - if dest_files[i] != "": - # If we have only one file to link we do not want to use the dest_dir, as - # $(@D) will include the full path to the file. - dest = "$(@D)/" + dest_dir + dest_files[i] if len(dest_files) != 1 else "$(@D)/" + dest_files[i] - - # Copy the headers to create a sandboxable setup. - cmd = "cp -f" - command.append(cmd + ' "%s" "%s"' % (src_files[i], dest)) - outs.append(' "' + dest_dir + dest_files[i] + '",') - genrule = _genrule( - src_dir, - genrule_name, - " && ".join(command), - "\n".join(outs), - ) - return genrule - -def _get_python_lib(repository_ctx, python_bin): - """Gets the python lib path.""" - python_lib = get_host_environ(repository_ctx, PYTHON_LIB_PATH) - if python_lib != None: - return python_lib - - # The interesting program to execute. - print_lib = [ - "from __future__ import print_function", - "import site", - "import os", - "python_paths = []", - "if os.getenv('PYTHONPATH') is not None:", - " python_paths = os.getenv('PYTHONPATH').split(':')", - "try:", - " library_paths = site.getsitepackages()", - "except AttributeError:", - " from distutils.sysconfig import get_python_lib", - " library_paths = [get_python_lib()]", - "all_paths = set(python_paths + library_paths)", - "paths = []", - "for path in all_paths:", - " if os.path.isdir(path):", - " paths.append(path)", - "if len(paths) >=1:", - " print(paths[0])", - ] - - # The below script writes the above program to a file - # and executes it. This is to work around the limitation - # of not being able to upload files as part of execute. - cmd = "from os import linesep;" - cmd += "f = open('script.py', 'w');" - for line in print_lib: - cmd += "f.write(\"%s\" + linesep);" % line - cmd += "f.close();" - cmd += "from subprocess import call;" - cmd += "call([\"%s\", \"script.py\"]);" % python_bin - - result = execute(repository_ctx, [python_bin, "-c", cmd]) - return result.stdout.strip() - -def _check_python_lib(repository_ctx, python_lib): - """Checks the python lib path.""" - cmd = 'test -d "%s" -a -x "%s"' % (python_lib, python_lib) - result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd]) - if result.return_code == 1: - auto_config_fail("Invalid python library path: %s" % python_lib) - -def _check_python_bin(repository_ctx, python_bin): - """Checks the python bin path.""" - cmd = '[[ -x "%s" ]] && [[ ! -d "%s" ]]' % (python_bin, python_bin) - result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd]) - if result.return_code == 1: - auto_config_fail("--define %s='%s' is not executable. Is it the python binary?" % ( - PYTHON_BIN_PATH, - python_bin, - )) - -def _get_python_include(repository_ctx, python_bin): - """Gets the python include path.""" - result = execute( - repository_ctx, - [ - python_bin, - "-Wignore", - "-c", - "import sysconfig; " + - "print(sysconfig.get_path('include'))", - ], - error_msg = "Problem getting python include path.", - error_details = ("Is the Python binary path set up right? " + - "(See ./configure or " + PYTHON_BIN_PATH + ".) " + - "Is distutils installed?"), - ) - return result.stdout.splitlines()[0] - -def _get_python_import_lib_name(repository_ctx, python_bin): - """Get Python import library name (pythonXY.lib) on Windows.""" - result = execute( - repository_ctx, - [ - python_bin, - "-c", - "import sys;" + - 'print("python" + str(sys.version_info[0]) + ' + - ' str(sys.version_info[1]) + ".lib")', - ], - error_msg = "Problem getting python import library.", - error_details = ("Is the Python binary path set up right? " + - "(See ./configure or " + PYTHON_BIN_PATH + ".) "), - ) - return result.stdout.splitlines()[0] - -def _get_numpy_include(repository_ctx, python_bin): - """Gets the numpy include path.""" - return execute( - repository_ctx, - [ - python_bin, - "-c", - "from __future__ import print_function;" + - "import numpy;" + - " print(numpy.get_include());", - ], - error_msg = "Problem getting numpy include path.", - error_details = "Is numpy installed?", - ).stdout.splitlines()[0] - -def _create_local_python_repository(repository_ctx): - """Creates the repository containing files set up to build with Python.""" - - # Resolve all labels before doing any real work. Resolving causes the - # function to be restarted with all previous state being lost. This - # can easily lead to a O(n^2) runtime in the number of labels. - build_tpl = repository_ctx.path(Label("//third_party/py:BUILD.tpl")) - - python_bin = get_python_bin(repository_ctx) - _check_python_bin(repository_ctx, python_bin) - python_lib = _get_python_lib(repository_ctx, python_bin) - _check_python_lib(repository_ctx, python_lib) - python_include = _get_python_include(repository_ctx, python_bin) - numpy_include = _get_numpy_include(repository_ctx, python_bin) + "/numpy" - python_include_rule = _symlink_genrule_for_dir( - repository_ctx, - python_include, - "python_include", - "python_include", - ) - python_import_lib_genrule = "" - - # To build Python C/C++ extension on Windows, we need to link to python import library pythonXY.lib - # See https://docs.python.org/3/extending/windows.html - if is_windows(repository_ctx): - python_bin = python_bin.replace("\\", "/") - python_include = _norm_path(python_include) - python_import_lib_name = _get_python_import_lib_name(repository_ctx, python_bin) - python_import_lib_src = python_include.rsplit("/", 1)[0] + "/libs/" + python_import_lib_name - python_import_lib_genrule = _symlink_genrule_for_dir( - repository_ctx, - None, - "", - "python_import_lib", - [python_import_lib_src], - [python_import_lib_name], - ) - numpy_include_rule = _symlink_genrule_for_dir( - repository_ctx, - numpy_include, - "numpy_include/numpy", - "numpy_include", - ) - - platform_constraint = "" - if repository_ctx.attr.platform_constraint: - platform_constraint = "\"%s\"" % repository_ctx.attr.platform_constraint - repository_ctx.template("BUILD", build_tpl, { - "%{PYTHON_BIN_PATH}": python_bin, - "%{PYTHON_INCLUDE_GENRULE}": python_include_rule, - "%{PYTHON_IMPORT_LIB_GENRULE}": python_import_lib_genrule, - "%{NUMPY_INCLUDE_GENRULE}": numpy_include_rule, - "%{PLATFORM_CONSTRAINT}": platform_constraint, - }) - -def _create_remote_python_repository(repository_ctx, remote_config_repo): - """Creates pointers to a remotely configured repo set up to build with Python. - """ - repository_ctx.template("BUILD", config_repo_label(remote_config_repo, ":BUILD"), {}) - -def _python_autoconf_impl(repository_ctx): - """Implementation of the python_autoconf repository rule.""" - if get_host_environ(repository_ctx, TF_PYTHON_CONFIG_REPO) != None: - _create_remote_python_repository( - repository_ctx, - get_host_environ(repository_ctx, TF_PYTHON_CONFIG_REPO), - ) - else: - _create_local_python_repository(repository_ctx) - -_ENVIRONS = [ - BAZEL_SH, - PYTHON_BIN_PATH, - PYTHON_LIB_PATH, -] - -local_python_configure = repository_rule( - implementation = _create_local_python_repository, - environ = _ENVIRONS, - attrs = { - "environ": attr.string_dict(), - "platform_constraint": attr.string(), - }, -) - -remote_python_configure = repository_rule( - implementation = _create_local_python_repository, - environ = _ENVIRONS, - remotable = True, - attrs = { - "environ": attr.string_dict(), - "platform_constraint": attr.string(), - }, -) - -python_configure = repository_rule( - implementation = _python_autoconf_impl, - environ = _ENVIRONS + [TF_PYTHON_CONFIG_REPO], - attrs = { - "platform_constraint": attr.string(), - }, -) -"""Detects and configures the local Python. - -Add the following to your WORKSPACE FILE: - -```python -python_configure(name = "local_config_python") -``` - -Args: - name: A unique name for this workspace rule. -""" diff --git a/third_party/xla/third_party/tsl/third_party/py/python_init_pip.bzl b/third_party/xla/third_party/tsl/third_party/py/python_init_pip.bzl new file mode 100644 index 00000000000000..efc2bf8233cf61 --- /dev/null +++ b/third_party/xla/third_party/tsl/third_party/py/python_init_pip.bzl @@ -0,0 +1,34 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@python//:defs.bzl", "interpreter") +load("@python_version_repo//:py_version.bzl", "REQUIREMENTS") +load("@rules_python//python:pip.bzl", "package_annotation", "pip_parse") + +def python_init_pip(): + numpy_annotations = { + "numpy": package_annotation( + additive_build_content = """\ +cc_library( + name = "numpy_headers_2", + hdrs = glob(["site-packages/numpy/_core/include/**/*.h"]), + strip_include_prefix="site-packages/numpy/_core/include/", +) +cc_library( + name = "numpy_headers_1", + hdrs = glob(["site-packages/numpy/core/include/**/*.h"]), + strip_include_prefix="site-packages/numpy/core/include/", +) +cc_library( + name = "numpy_headers", + deps = [":numpy_headers_2", ":numpy_headers_1"], +) +""", + ), + } + + pip_parse( + name = "pypi", + annotations = numpy_annotations, + python_interpreter_target = interpreter, + requirements_lock = REQUIREMENTS, + ) diff --git a/third_party/xla/third_party/tsl/third_party/py/python_init_repositories.bzl b/third_party/xla/third_party/tsl/third_party/py/python_init_repositories.bzl new file mode 100644 index 00000000000000..5a405f2c2aba4c --- /dev/null +++ b/third_party/xla/third_party/tsl/third_party/py/python_init_repositories.bzl @@ -0,0 +1,12 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@rules_python//python:repositories.bzl", "py_repositories") +load("//third_party/py:python_repo.bzl", "python_repository") + +def python_init_repositories(requirements = {}): + python_repository( + name = "python_version_repo", + requirements_versions = requirements.keys(), + requirements_locks = requirements.values(), + ) + py_repositories() diff --git a/third_party/xla/third_party/tsl/third_party/py/python_init_rules.bzl b/third_party/xla/third_party/tsl/third_party/py/python_init_rules.bzl new file mode 100644 index 00000000000000..98a7b8bc3c315a --- /dev/null +++ b/third_party/xla/third_party/tsl/third_party/py/python_init_rules.bzl @@ -0,0 +1,11 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def python_init_rules(): + http_archive( + name = "rules_python", + sha256 = "9d04041ac92a0985e344235f5d946f71ac543f1b1565f2cdbc9a2aaee8adf55b", + strip_prefix = "rules_python-0.26.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.26.0/rules_python-0.26.0.tar.gz", + ) diff --git a/third_party/xla/third_party/tsl/third_party/py/python_init_toolchains.bzl b/third_party/xla/third_party/tsl/third_party/py/python_init_toolchains.bzl new file mode 100644 index 00000000000000..c1f800db4c01e7 --- /dev/null +++ b/third_party/xla/third_party/tsl/third_party/py/python_init_toolchains.bzl @@ -0,0 +1,13 @@ +"""Hermetic Python initialization. Consult the WORKSPACE on how to use it.""" + +load("@python_version_repo//:py_version.bzl", "HERMETIC_PYTHON_VERSION") +load("@rules_python//python:repositories.bzl", "python_register_toolchains") +load("@rules_python//python:versions.bzl", "MINOR_MAPPING") + +def python_init_toolchains(): + if HERMETIC_PYTHON_VERSION in MINOR_MAPPING: + python_register_toolchains( + name = "python", + ignore_root_user_error = True, + python_version = HERMETIC_PYTHON_VERSION, + ) diff --git a/third_party/xla/third_party/tsl/third_party/py/python_repo.bzl b/third_party/xla/third_party/tsl/third_party/py/python_repo.bzl new file mode 100644 index 00000000000000..77a6ce9ce50b60 --- /dev/null +++ b/third_party/xla/third_party/tsl/third_party/py/python_repo.bzl @@ -0,0 +1,206 @@ +""" +Repository rule to manage hermetic Python interpreter under Bazel. + +Version can be set via build parameter "--repo_env=HERMETIC_PYTHON_VERSION=3.11" +Defaults to 3.11. + +To set wheel name, add "--repo_env=WHEEL_NAME=tensorflow_cpu" +""" + +VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] +DEFAULT_VERSION = "3.11" +WARNING = """ +HERMETIC_PYTHON_VERSION variable was not set correctly, using default version. +Python {} will be used. +To select Python version, either set HERMETIC_PYTHON_VERSION env variable in +your shell: + export HERMETIC_PYTHON_VERSION=3.12 +OR pass it as an argument to bazel command directly or inside your .bazelrc +file: + --repo_env=HERMETIC_PYTHON_VERSION=3.12 +""".format(DEFAULT_VERSION) + +content = """TF_PYTHON_VERSION = "{version}" +HERMETIC_PYTHON_VERSION = "{version}" +WHEEL_NAME = "{wheel_name}" +WHEEL_COLLAB = "{wheel_collab}" +REQUIREMENTS = "{requirements}" +""" + +def _python_repository_impl(ctx): + ctx.file("BUILD", "") + version_legacy = ctx.os.environ.get("TF_PYTHON_VERSION", "") + version = ctx.os.environ.get("HERMETIC_PYTHON_VERSION", "") + if not version: + version = version_legacy + else: + version_legacy = version + + wheel_name = ctx.os.environ.get("WHEEL_NAME", "tensorflow") + wheel_collab = ctx.os.environ.get("WHEEL_COLLAB", False) + if version not in VERSIONS: + print(WARNING) # buildifier: disable=print + version = DEFAULT_VERSION + else: + print("Using hermetic Python %s" % version) # buildifier: disable=print + + requirements = "" + for i in range(0, len(ctx.attr.requirements_locks)): + if ctx.attr.requirements_versions[i] == version: + requirements = ctx.attr.requirements_locks[i] + break + + ctx.file( + "py_version.bzl", + content.format( + version = version, + wheel_name = wheel_name, + wheel_collab = wheel_collab, + requirements = str(requirements), + ), + ) + +python_repository = repository_rule( + implementation = _python_repository_impl, + attrs = { + "requirements_versions": attr.string_list( + mandatory = False, + default = [], + ), + "requirements_locks": attr.label_list( + mandatory = False, + default = [], + ), + }, + environ = [ + "TF_PYTHON_VERSION", + "HERMETIC_PYTHON_VERSION", + "WHEEL_NAME", + "WHEEL_COLLAB", + ], +) + +def _custom_python_interpreter_impl(ctx): + version = ctx.attr.version + strip_prefix = ctx.attr.strip_prefix.format(version = version) + urls = [url.format(version = version) for url in ctx.attr.urls] + binary_name = ctx.attr.binary_name + if not binary_name: + ver_chunks = version.split(".") + binary_name = "python%s.%s" % (ver_chunks[0], ver_chunks[1]) + + install_dir = "{name}-{version}".format(name = ctx.attr.name, version = version) + _exec_and_check(ctx, ["mkdir", install_dir]) + install_path = ctx.path(install_dir) + srcs_dir = "srcs" + ctx.download_and_extract( + url = urls, + stripPrefix = strip_prefix, + output = srcs_dir, + ) + + configure_params = [] + if "CC" in ctx.os.environ: + configure_params.append("CC={}".format(ctx.os.environ["CC"])) + if "CXX" in ctx.os.environ: + configure_params.append("CXX={}".format(ctx.os.environ["CXX"])) + + configure_params.append("--enable-optimizations") + configure_params.append("--prefix=%s" % install_path.realpath) + _exec_and_check( + ctx, + ["./configure"] + configure_params, + working_directory = srcs_dir, + quiet = False, + ) + res = _exec_and_check(ctx, ["nproc"]) + cores = 12 if res.return_code != 0 else max(1, int(res.stdout.strip()) - 1) + _exec_and_check(ctx, ["make", "-j%s" % cores], working_directory = srcs_dir) + _exec_and_check(ctx, ["make", "altinstall"], working_directory = srcs_dir) + _exec_and_check(ctx, ["ln", "-s", binary_name, "python3"], working_directory = install_dir + "/bin") + tar = "{install_dir}.tgz".format(install_dir = install_dir) + _exec_and_check(ctx, ["tar", "czpf", tar, install_dir]) + _exec_and_check(ctx, ["rm", "-rf", srcs_dir]) + res = _exec_and_check(ctx, ["sha256sum", tar]) + + sha256 = res.stdout.split(" ")[0].strip() + tar_path = ctx.path(tar) + + example = """\n\n +To use newly built Python interpreter add the following code snippet RIGHT AFTER +python_init_toolchains() in your WORKSPACE file. The code sample should work as +is but it may need some tuning, if you have special requirements. + +``` +load("@rules_python//python:repositories.bzl", "python_register_toolchains") +python_register_toolchains( + name = "python", + # By default assume the interpreter is on the local file system, replace + # with proper URL if it is not the case. + base_url = "file://", + ignore_root_user_error = True, + python_version = "{version}", + tool_versions = {{ + "{version}": {{ + # Path to .tar.gz with Python binary. By default it points to .tgz + # file in cache where it was built originally; replace with proper + # file location, if you moved it somewhere else. + "url": "{tar_path}", + "sha256": {{ + # By default we assume Linux x86_64 architecture, eplace with + # proper architecture if you were building on a different platform. + "x86_64-unknown-linux-gnu": "{sha256}", + }}, + "strip_prefix": "{install_dir}", + }}, + }}, +) +``` +\n\n""".format(version = version, tar_path = tar_path, sha256 = sha256, install_dir = install_dir) + + instructions = "INSTRUCTIONS-{version}.md".format(version = version) + ctx.file(instructions + ".tmpl", example, executable = False) + ctx.file( + "BUILD.bazel", + """ +genrule( + name = "{name}", + srcs = ["{tar}", "{instructions}.tmpl"], + outs = ["{install_dir}.tar.gz", "{instructions}"], + cmd = "cp $(location {tar}) $(location {install_dir}.tar.gz); cp $(location {instructions}.tmpl) $(location {instructions})", + visibility = ["//visibility:public"], +) + """.format( + name = ctx.attr.name, + tar = tar, + install_dir = install_dir, + instructions = instructions, + ), + executable = False, + ) + + print(example) # buildifier: disable=print + +custom_python_interpreter = repository_rule( + implementation = _custom_python_interpreter_impl, + attrs = { + "urls": attr.string_list(), + "strip_prefix": attr.string(), + "binary_name": attr.string(mandatory = False), + "version": attr.string(), + }, +) + +def _exec_and_check(ctx, command, fail_on_error = True, quiet = False, **kwargs): + res = ctx.execute(command, quiet = quiet, **kwargs) + if fail_on_error and res.return_code != 0: + fail(""" +Failed to execute command: `{command}` +Exit Code: {code} +STDERR: {stderr} + """.format( + command = command, + code = res.return_code, + stderr = res.stderr, + )) + return res diff --git a/third_party/xla/third_party/tsl/third_party/tf_runtime/workspace.bzl b/third_party/xla/third_party/tsl/third_party/tf_runtime/workspace.bzl index 8cd9762125eee0..04d0e390c8dfe3 100644 --- a/third_party/xla/third_party/tsl/third_party/tf_runtime/workspace.bzl +++ b/third_party/xla/third_party/tsl/third_party/tf_runtime/workspace.bzl @@ -6,8 +6,8 @@ def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. - TFRT_COMMIT = "7bdf48f1aac0b48ff85a4e0fb5ff7f98a703f8d6" - TFRT_SHA256 = "dc02eeae8f6c3e72bf51cad0f04676eeefabe241389b6f08b8310c999eeb64b3" + TFRT_COMMIT = "edb0d2c6f5e343c83ea121817dc2599ad5453d5c" + TFRT_SHA256 = "97f7bfcbff025da3005e59b9ffe1bcb06b439874e3e2cd28a17d9287193d6901" tf_http_archive( name = "tf_runtime", diff --git a/third_party/xla/third_party/tsl/tools/def_file_filter/symbols_pybind.txt b/third_party/xla/third_party/tsl/tools/def_file_filter/symbols_pybind.txt index 78c42c6f454c5c..69aea896b57102 100644 --- a/third_party/xla/third_party/tsl/tools/def_file_filter/symbols_pybind.txt +++ b/third_party/xla/third_party/tsl/tools/def_file_filter/symbols_pybind.txt @@ -566,6 +566,10 @@ tensorflow::quantization::QuantizeStaticRangePtq tensorflow::quantization::QuantizeDynamicRangePtq tensorflow::quantization::QuantizeWeightOnly +[//tensorflow/compiler/mlir/quantization/tensorflow_to_stablehlo/python:pywrap_tensorflow_to_stablehlo_lib_impl] # tensorflow_to_stablehlo +mlir::tensorflow_to_stablehlo::pywrap::PywrapSavedModelToStablehlo +mlir::tensorflow_to_stablehlo::pywrap::PywrapTfModuleToStablehlo + [//tensorflow/dtensor/cc:dtensor_device_cc] # DTensor tensorflow::dtensor::AllocateDTensorDevice tensorflow::dtensor::AddMesh @@ -590,3 +594,9 @@ tensorflow::dtensor::SetIteratorElementLayouts tensorflow::dtensor::Mesh tensorflow::dtensor::Layout tensorflow::dtensor::Layout::LayoutType + +[//tensorflow/core/tpu/kernels:sparse_core_layout] #SparseCoreLayoutStacker +tensorflow::tpu::SparseCoreLayoutStacker::AddTable +tensorflow::tpu::SparseCoreLayoutStacker::SparseCoreLayoutStacker +tensorflow::tpu::SparseCoreLayoutStacker::AddTable +tensorflow::tpu::SparseCoreLayoutStacker::GetLayouts \ No newline at end of file diff --git a/third_party/xla/third_party/tsl/tsl/framework/bfc_allocator.h b/third_party/xla/third_party/tsl/tsl/framework/bfc_allocator.h index 76921c5f04a79d..6ea913059c472a 100644 --- a/third_party/xla/third_party/tsl/tsl/framework/bfc_allocator.h +++ b/third_party/xla/third_party/tsl/tsl/framework/bfc_allocator.h @@ -27,6 +27,7 @@ limitations under the License. #include "tsl/framework/allocator.h" #include "tsl/framework/allocator_retry.h" #include "tsl/framework/shared_counter.h" +#include "tsl/lib/core/bits.h" #include "tsl/platform/macros.h" #include "tsl/platform/mutex.h" #include "tsl/platform/numbers.h" @@ -536,28 +537,6 @@ class BFCAllocator : public Allocator { // Structures immutable after construction size_t memory_limit_ = 0; - inline int Log2FloorNonZeroSlow(uint64 n) { - int r = 0; - while (n > 0) { - r++; - n >>= 1; - } - return r - 1; - } - - // Returns floor(log2(n)). - inline int Log2FloorNonZero(uint64 n) { -#if defined(__GNUC__) - return 63 ^ __builtin_clzll(n); -#elif defined(PLATFORM_WINDOWS) && (_WIN64) - unsigned long index; - _BitScanReverse64(&index, n); - return index; -#else - return Log2FloorNonZeroSlow(n); -#endif - } - // Map from bin size to Bin Bin* BinFromIndex(BinNum index) { return reinterpret_cast(&(bins_space_[index * sizeof(Bin)])); @@ -567,7 +546,7 @@ class BFCAllocator : public Allocator { } BinNum BinNumForSize(size_t bytes) { uint64 v = std::max(bytes, 256) >> kMinAllocationBits; - int b = std::min(kNumBins - 1, Log2FloorNonZero(v)); + int b = std::min(kNumBins - 1, tsl::Log2Floor64(v)); return b; } Bin* BinForSize(size_t bytes) { return BinFromIndex(BinNumForSize(bytes)); } diff --git a/third_party/xla/third_party/tsl/tsl/framework/mlir/status_scoped_diagnostic_handler.cc b/third_party/xla/third_party/tsl/tsl/framework/mlir/status_scoped_diagnostic_handler.cc index 8882ff8e66cf1d..b2e447a5e778a0 100644 --- a/third_party/xla/third_party/tsl/tsl/framework/mlir/status_scoped_diagnostic_handler.cc +++ b/third_party/xla/third_party/tsl/tsl/framework/mlir/status_scoped_diagnostic_handler.cc @@ -59,6 +59,7 @@ mlir::LogicalResult StatusScopedDiagnosticHandler::handleDiagnostic( // Emit non-errors to VLOG instead of the internal status. if (diag.getSeverity() != mlir::DiagnosticSeverity::Error) { VLOG(1) << diag_str_; + return mlir::success(); } status_.Update(absl::UnknownError(diag_str_)); diff --git a/third_party/xla/third_party/tsl/tsl/lib/core/BUILD b/third_party/xla/third_party/tsl/tsl/lib/core/BUILD index c8bf89d1aa9bd4..0fee1ed113152a 100644 --- a/third_party/xla/third_party/tsl/tsl/lib/core/BUILD +++ b/third_party/xla/third_party/tsl/tsl/lib/core/BUILD @@ -83,6 +83,7 @@ cc_library( compatible_with = get_compatible_with_portable(), deps = [ "//tsl/platform:logging", + "@com_google_absl//absl/numeric:bits", ], alwayslink = 1, ) @@ -102,7 +103,6 @@ cc_library( hdrs = ["bits.h"], deps = [ "//tsl/platform:logging", - "//tsl/platform:types", "@com_google_absl//absl/numeric:bits", ], ) diff --git a/third_party/xla/third_party/tsl/tsl/lib/core/bitmap.cc b/third_party/xla/third_party/tsl/tsl/lib/core/bitmap.cc index f8648d95e3ec5e..8681428cbd879d 100644 --- a/third_party/xla/third_party/tsl/tsl/lib/core/bitmap.cc +++ b/third_party/xla/third_party/tsl/tsl/lib/core/bitmap.cc @@ -15,8 +15,13 @@ limitations under the License. #include "tsl/lib/core/bitmap.h" +#include +#include +#include #include +#include "absl/numeric/bits.h" + namespace tsl { namespace core { @@ -34,41 +39,7 @@ void Bitmap::Reset(size_t n) { // Return 1+index of the first set bit in w; return 0 if w == 0. static size_t FindFirstSet(uint32_t w) { - // TODO(jeff,sanjay): If this becomes a performance issue, we could - // use the __builtin_ffs(w) routine on GCC, or the ffs(w) routine on - // some other platforms. - - // clang-format off - static uint8_t kLowestBitSet[256] = { - /* 0*/ 0, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /* 16*/ 5, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /* 32*/ 6, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /* 48*/ 5, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /* 64*/ 7, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /* 80*/ 5, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /* 96*/ 6, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /*112*/ 5, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /*128*/ 8, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /*144*/ 5, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /*160*/ 6, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /*176*/ 5, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /*192*/ 7, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /*208*/ 5, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /*224*/ 6, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - /*240*/ 5, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1, - }; - // clang-format on - if (w & 0xff) { - return kLowestBitSet[w & 0xff]; - } else if ((w >> 8) & 0xff) { - return kLowestBitSet[(w >> 8) & 0xff] + 8; - } else if ((w >> 16) & 0xff) { - return kLowestBitSet[(w >> 16) & 0xff] + 16; - } else if ((w >> 24) & 0xff) { - return kLowestBitSet[(w >> 24) & 0xff] + 24; - } else { - return 0; - } + return w == 0 ? 0 : absl::countr_zero(w) + 1; } size_t Bitmap::FirstUnset(size_t start) const { diff --git a/third_party/xla/third_party/tsl/tsl/lib/core/bits.h b/third_party/xla/third_party/tsl/tsl/lib/core/bits.h index 9a31ae5815febe..f179117201abad 100644 --- a/third_party/xla/third_party/tsl/tsl/lib/core/bits.h +++ b/third_party/xla/third_party/tsl/tsl/lib/core/bits.h @@ -20,91 +20,29 @@ limitations under the License. #include "absl/numeric/bits.h" #include "tsl/platform/logging.h" -#include "tsl/platform/types.h" namespace tsl { // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0. -int Log2Floor(uint32 n); -int Log2Floor64(uint64 n); - -// Return ceiling(log2(n)) for positive integer n. Returns -1 iff n == 0. -int Log2Ceiling(uint32 n); -int Log2Ceiling64(uint64 n); - -// ------------------------------------------------------------------------ -// Implementation details follow -// ------------------------------------------------------------------------ - -#if defined(__GNUC__) - -// Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0. -inline int Log2Floor(uint32 n) { return n == 0 ? -1 : 31 ^ __builtin_clz(n); } - -// Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0. -inline int Log2Floor64(uint64 n) { - return n == 0 ? -1 : 63 ^ __builtin_clzll(n); -} - -#else +inline int Log2Floor(uint32_t n) { return absl::bit_width(n) - 1; } // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0. -inline int Log2Floor(uint32 n) { - if (n == 0) return -1; - int log = 0; - uint32 value = n; - for (int i = 4; i >= 0; --i) { - int shift = (1 << i); - uint32 x = value >> shift; - if (x != 0) { - value = x; - log += shift; - } - } - assert(value == 1); - return log; -} - -// Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0. -// Log2Floor64() is defined in terms of Log2Floor32() -inline int Log2Floor64(uint64 n) { - const uint32 topbits = static_cast(n >> 32); - if (topbits == 0) { - // Top bits are zero, so scan in bottom bits - return Log2Floor(static_cast(n)); - } else { - return 32 + Log2Floor(topbits); - } -} +inline int Log2Floor64(uint64_t n) { return absl::bit_width(n) - 1; } -#endif - -inline int Log2Ceiling(uint32 n) { - int floor = Log2Floor(n); - if (n == (n & ~(n - 1))) // zero or a power of two - return floor; - else - return floor + 1; +// Return ceiling(log2(n)) for positive integer n. Returns -1 iff n == 0. +inline int Log2Ceiling(uint32_t n) { + return n == 0 ? -1 : absl::bit_width(n - 1); } -inline int Log2Ceiling64(uint64 n) { - int floor = Log2Floor64(n); - if (n == (n & ~(n - 1))) // zero or a power of two - return floor; - else - return floor + 1; +// Return ceiling(log2(n)) for positive integer n. Returns -1 iff n == 0. +inline int Log2Ceiling64(uint64_t n) { + return n == 0 ? -1 : absl::bit_width(n - 1); } -inline uint32 NextPowerOfTwo(uint32 value) { - int exponent = Log2Ceiling(value); - DCHECK_LT(exponent, std::numeric_limits::digits); - return 1 << exponent; -} +inline uint32_t NextPowerOfTwo(uint32_t value) { return absl::bit_ceil(value); } -inline uint64 NextPowerOfTwo64(uint64 value) { - int exponent = Log2Ceiling(value); - DCHECK_LT(exponent, std::numeric_limits::digits); - return 1LL << exponent; +inline uint64_t NextPowerOfTwo64(uint64_t value) { + return absl::bit_ceil(value); } inline int64_t NextPowerOfTwoS64(int64_t value) { diff --git a/third_party/xla/third_party/tsl/tsl/lib/io/BUILD b/third_party/xla/third_party/tsl/tsl/lib/io/BUILD index e92e2896dc9241..c103dcfdc5a417 100644 --- a/third_party/xla/third_party/tsl/tsl/lib/io/BUILD +++ b/third_party/xla/third_party/tsl/tsl/lib/io/BUILD @@ -18,6 +18,7 @@ package( "//tensorflow/core:__pkg__", "//tensorflow/core/lib/io:__subpackages__", "//tsl/profiler:__subpackages__", + "//tensorflow/core/profiler:__subpackages__", ]), licenses = ["notice"], ) diff --git a/third_party/xla/third_party/tsl/tsl/platform/BUILD b/third_party/xla/third_party/tsl/tsl/platform/BUILD index 3b1088a76ee64b..6d96af9ac0f6f7 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/BUILD +++ b/third_party/xla/third_party/tsl/tsl/platform/BUILD @@ -1455,6 +1455,7 @@ tsl_cc_test( "//tsl/platform/testdata:test_noop", "//tsl/platform/testdata:test_stderr", ], + tags = ["no_oss"], # TODO(b/327036247): revisit after this moves to XLA deps = [ ":path", ":strcat", diff --git a/third_party/xla/third_party/tsl/tsl/platform/cloud/BUILD b/third_party/xla/third_party/tsl/tsl/platform/cloud/BUILD index 2ce4463f65b7ee..bff2db41d43626 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/cloud/BUILD +++ b/third_party/xla/third_party/tsl/tsl/platform/cloud/BUILD @@ -455,6 +455,7 @@ tsl_cc_test( "//tsl/platform/cloud/testdata:service_account_credentials", "//tsl/platform/cloud/testdata:service_account_public_key", ], + tags = ["no_oss"], # TODO(b/327036247): revisit after this moves to XLA deps = [ ":http_request_fake", ":oauth_client", @@ -478,6 +479,7 @@ tsl_cc_test( "//tsl/platform/cloud/testdata:application_default_credentials", "//tsl/platform/cloud/testdata:service_account_credentials", ], + tags = ["no_oss"], # TODO(b/327036247): revisit after this moves to XLA deps = [ ":google_auth_provider", ":http_request_fake", diff --git a/third_party/xla/third_party/tsl/tsl/platform/default/build_config_root.bzl b/third_party/xla/third_party/tsl/tsl/platform/default/build_config_root.bzl index a900565143027d..142641b16d2fa3 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/default/build_config_root.bzl +++ b/third_party/xla/third_party/tsl/tsl/platform/default/build_config_root.bzl @@ -12,7 +12,7 @@ GPU_TEST_PROPERTIES = { } def tf_gpu_tests_tags(): - return ["gpu"] + gpu_test_tags() + return ["requires-gpu-nvidia", "gpu"] + gpu_test_tags() # terminology changes: saving tf_cuda_* for compatibility def tf_cuda_tests_tags(): diff --git a/third_party/xla/third_party/tsl/tsl/platform/default/dso_loader.cc b/third_party/xla/third_party/tsl/tsl/platform/default/dso_loader.cc index a835a81489367a..39c09808530185 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/default/dso_loader.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/default/dso_loader.cc @@ -181,7 +181,13 @@ absl::StatusOr GetHipblasltDsoHandle() { return GetDsoHandle("hipblaslt", ""); } -absl::StatusOr GetHipDsoHandle() { return GetDsoHandle("amdhip64", ""); } +#if (TF_ROCM_VERSION >= 60000) +#define HIP_SO_VERSION "6" +#else +#define HIP_SO_VERSION "5" +#endif + +absl::StatusOr GetHipDsoHandle() { return GetDsoHandle("amdhip64", HIP_SO_VERSION); } } // namespace DsoLoader diff --git a/third_party/xla/third_party/tsl/tsl/platform/env.cc b/third_party/xla/third_party/tsl/tsl/platform/env.cc index 2649aa00a5d33c..45199b50ebd94b 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/env.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/env.cc @@ -55,11 +55,11 @@ constexpr size_t kCopyFileBufferSize = 128 * 1024; class FileSystemRegistryImpl : public FileSystemRegistry { public: - Status Register(const std::string& scheme, Factory factory) override; - Status Register(const std::string& scheme, - std::unique_ptr filesystem) override; + absl::Status Register(const std::string& scheme, Factory factory) override; + absl::Status Register(const std::string& scheme, + std::unique_ptr filesystem) override; FileSystem* Lookup(const std::string& scheme) override; - Status GetRegisteredFileSystemSchemes( + absl::Status GetRegisteredFileSystemSchemes( std::vector* schemes) override; private: @@ -68,25 +68,25 @@ class FileSystemRegistryImpl : public FileSystemRegistry { TF_GUARDED_BY(mu_); }; -Status FileSystemRegistryImpl::Register(const std::string& scheme, - FileSystemRegistry::Factory factory) { +absl::Status FileSystemRegistryImpl::Register( + const std::string& scheme, FileSystemRegistry::Factory factory) { mutex_lock lock(mu_); if (!registry_.emplace(scheme, std::unique_ptr(factory())) .second) { return errors::AlreadyExists("File factory for ", scheme, " already registered"); } - return OkStatus(); + return absl::OkStatus(); } -Status FileSystemRegistryImpl::Register( +absl::Status FileSystemRegistryImpl::Register( const std::string& scheme, std::unique_ptr filesystem) { mutex_lock lock(mu_); if (!registry_.emplace(scheme, std::move(filesystem)).second) { return errors::AlreadyExists("File system for ", scheme, " already registered"); } - return OkStatus(); + return absl::OkStatus(); } FileSystem* FileSystemRegistryImpl::Lookup(const std::string& scheme) { @@ -98,19 +98,19 @@ FileSystem* FileSystemRegistryImpl::Lookup(const std::string& scheme) { return found->second.get(); } -Status FileSystemRegistryImpl::GetRegisteredFileSystemSchemes( +absl::Status FileSystemRegistryImpl::GetRegisteredFileSystemSchemes( std::vector* schemes) { mutex_lock lock(mu_); for (const auto& e : registry_) { schemes->push_back(e.first); } - return OkStatus(); + return absl::OkStatus(); } Env::Env() : file_system_registry_(new FileSystemRegistryImpl) {} -Status Env::GetFileSystemForFile(const std::string& fname, - FileSystem** result) { +absl::Status Env::GetFileSystemForFile(const std::string& fname, + FileSystem** result) { StringPiece scheme, host, path; io::ParseURI(fname, &scheme, &host, &path); FileSystem* file_system = file_system_registry_->Lookup(std::string(scheme)); @@ -123,25 +123,26 @@ Status Env::GetFileSystemForFile(const std::string& fname, "' not implemented (file: '", fname, "')"); } *result = file_system; - return OkStatus(); + return absl::OkStatus(); } -Status Env::GetRegisteredFileSystemSchemes(std::vector* schemes) { +absl::Status Env::GetRegisteredFileSystemSchemes( + std::vector* schemes) { return file_system_registry_->GetRegisteredFileSystemSchemes(schemes); } -Status Env::RegisterFileSystem(const std::string& scheme, - FileSystemRegistry::Factory factory) { +absl::Status Env::RegisterFileSystem(const std::string& scheme, + FileSystemRegistry::Factory factory) { return file_system_registry_->Register(scheme, std::move(factory)); } -Status Env::RegisterFileSystem(const std::string& scheme, - std::unique_ptr filesystem) { +absl::Status Env::RegisterFileSystem(const std::string& scheme, + std::unique_ptr filesystem) { return file_system_registry_->Register(scheme, std::move(filesystem)); } -Status Env::SetOption(const std::string& scheme, const std::string& key, - const std::string& value) { +absl::Status Env::SetOption(const std::string& scheme, const std::string& key, + const std::string& value) { FileSystem* file_system = file_system_registry_->Lookup(scheme); if (!file_system) { return errors::Unimplemented("File system scheme '", scheme, @@ -150,8 +151,8 @@ Status Env::SetOption(const std::string& scheme, const std::string& key, return file_system->SetOption(key, value); } -Status Env::SetOption(const std::string& scheme, const std::string& key, - const std::vector& values) { +absl::Status Env::SetOption(const std::string& scheme, const std::string& key, + const std::vector& values) { FileSystem* file_system = file_system_registry_->Lookup(scheme); if (!file_system) { return errors::Unimplemented("File system scheme '", scheme, @@ -160,8 +161,8 @@ Status Env::SetOption(const std::string& scheme, const std::string& key, return file_system->SetOption(key, values); } -Status Env::SetOption(const std::string& scheme, const std::string& key, - const std::vector& values) { +absl::Status Env::SetOption(const std::string& scheme, const std::string& key, + const std::vector& values) { FileSystem* file_system = file_system_registry_->Lookup(scheme); if (!file_system) { return errors::Unimplemented("File system scheme '", scheme, @@ -170,8 +171,8 @@ Status Env::SetOption(const std::string& scheme, const std::string& key, return file_system->SetOption(key, values); } -Status Env::SetOption(const std::string& scheme, const std::string& key, - const std::vector& values) { +absl::Status Env::SetOption(const std::string& scheme, const std::string& key, + const std::vector& values) { FileSystem* file_system = file_system_registry_->Lookup(scheme); if (!file_system) { return errors::Unimplemented("File system scheme '", scheme, @@ -180,7 +181,7 @@ Status Env::SetOption(const std::string& scheme, const std::string& key, return file_system->SetOption(key, values); } -Status Env::FlushFileSystemCaches() { +absl::Status Env::FlushFileSystemCaches() { std::vector schemes; TF_RETURN_IF_ERROR(GetRegisteredFileSystemSchemes(&schemes)); for (const string& scheme : schemes) { @@ -189,45 +190,45 @@ Status Env::FlushFileSystemCaches() { GetFileSystemForFile(io::CreateURI(scheme, "", ""), &fs)); fs->FlushCaches(); } - return OkStatus(); + return absl::OkStatus(); } -Status Env::NewRandomAccessFile(const string& fname, - std::unique_ptr* result) { +absl::Status Env::NewRandomAccessFile( + const string& fname, std::unique_ptr* result) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(fname, &fs)); return fs->NewRandomAccessFile(fname, result); } -Status Env::NewReadOnlyMemoryRegionFromFile( +absl::Status Env::NewReadOnlyMemoryRegionFromFile( const string& fname, std::unique_ptr* result) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(fname, &fs)); return fs->NewReadOnlyMemoryRegionFromFile(fname, result); } -Status Env::NewWritableFile(const string& fname, - std::unique_ptr* result) { +absl::Status Env::NewWritableFile(const string& fname, + std::unique_ptr* result) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(fname, &fs)); return fs->NewWritableFile(fname, result); } -Status Env::NewAppendableFile(const string& fname, - std::unique_ptr* result) { +absl::Status Env::NewAppendableFile(const string& fname, + std::unique_ptr* result) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(fname, &fs)); return fs->NewAppendableFile(fname, result); } -Status Env::FileExists(const string& fname) { +absl::Status Env::FileExists(const string& fname) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(fname, &fs)); return fs->FileExists(fname); } bool Env::FilesExist(const std::vector& files, - std::vector* status) { + std::vector* status) { std::unordered_map> files_per_fs; for (const auto& file : files) { StringPiece scheme, host, path; @@ -235,18 +236,18 @@ bool Env::FilesExist(const std::vector& files, files_per_fs[string(scheme)].push_back(file); } - std::unordered_map per_file_status; + std::unordered_map per_file_status; bool result = true; for (auto itr : files_per_fs) { FileSystem* file_system = file_system_registry_->Lookup(itr.first); bool fs_result; - std::vector local_status; - std::vector* fs_status = status ? &local_status : nullptr; + std::vector local_status; + std::vector* fs_status = status ? &local_status : nullptr; if (!file_system) { fs_result = false; if (fs_status) { - Status s = errors::Unimplemented("File system scheme '", itr.first, - "' not implemented"); + absl::Status s = errors::Unimplemented("File system scheme '", + itr.first, "' not implemented"); local_status.resize(itr.second.size(), s); } } else { @@ -272,75 +273,76 @@ bool Env::FilesExist(const std::vector& files, return result; } -Status Env::GetChildren(const string& dir, std::vector* result) { +absl::Status Env::GetChildren(const string& dir, std::vector* result) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(dir, &fs)); return fs->GetChildren(dir, result); } -Status Env::GetMatchingPaths(const string& pattern, - std::vector* results) { +absl::Status Env::GetMatchingPaths(const string& pattern, + std::vector* results) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(pattern, &fs)); return fs->GetMatchingPaths(pattern, results); } -Status Env::DeleteFile(const string& fname) { +absl::Status Env::DeleteFile(const string& fname) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(fname, &fs)); return fs->DeleteFile(fname); } -Status Env::RecursivelyCreateDir(const string& dirname) { +absl::Status Env::RecursivelyCreateDir(const string& dirname) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(dirname, &fs)); return fs->RecursivelyCreateDir(dirname); } -Status Env::CreateDir(const string& dirname) { +absl::Status Env::CreateDir(const string& dirname) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(dirname, &fs)); return fs->CreateDir(dirname); } -Status Env::DeleteDir(const string& dirname) { +absl::Status Env::DeleteDir(const string& dirname) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(dirname, &fs)); return fs->DeleteDir(dirname); } -Status Env::Stat(const string& fname, FileStatistics* stat) { +absl::Status Env::Stat(const string& fname, FileStatistics* stat) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(fname, &fs)); return fs->Stat(fname, stat); } -Status Env::IsDirectory(const string& fname) { +absl::Status Env::IsDirectory(const string& fname) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(fname, &fs)); return fs->IsDirectory(fname); } -Status Env::HasAtomicMove(const string& path, bool* has_atomic_move) { +absl::Status Env::HasAtomicMove(const string& path, bool* has_atomic_move) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(path, &fs)); return fs->HasAtomicMove(path, has_atomic_move); } -Status Env::DeleteRecursively(const string& dirname, int64_t* undeleted_files, - int64_t* undeleted_dirs) { +absl::Status Env::DeleteRecursively(const string& dirname, + int64_t* undeleted_files, + int64_t* undeleted_dirs) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(dirname, &fs)); return fs->DeleteRecursively(dirname, undeleted_files, undeleted_dirs); } -Status Env::GetFileSize(const string& fname, uint64* file_size) { +absl::Status Env::GetFileSize(const string& fname, uint64* file_size) { FileSystem* fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(fname, &fs)); return fs->GetFileSize(fname, file_size); } -Status Env::RenameFile(const string& src, const string& target) { +absl::Status Env::RenameFile(const string& src, const string& target) { FileSystem* src_fs; FileSystem* target_fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(src, &src_fs)); @@ -352,7 +354,7 @@ Status Env::RenameFile(const string& src, const string& target) { return src_fs->RenameFile(src, target); } -Status Env::CopyFile(const string& src, const string& target) { +absl::Status Env::CopyFile(const string& src, const string& target) { FileSystem* src_fs; FileSystem* target_fs; TF_RETURN_IF_ERROR(GetFileSystemForFile(src, &src_fs)); @@ -464,9 +466,9 @@ Thread::~Thread() {} EnvWrapper::~EnvWrapper() {} -Status ReadFileToString(Env* env, const string& fname, string* data) { +absl::Status ReadFileToString(Env* env, const string& fname, string* data) { uint64 file_size; - Status s = env->GetFileSize(fname, &file_size); + absl::Status s = env->GetFileSize(fname, &file_size); if (!s.ok()) { return s; } @@ -493,10 +495,10 @@ Status ReadFileToString(Env* env, const string& fname, string* data) { return s; } -Status WriteStringToFile(Env* env, const string& fname, - const StringPiece& data) { +absl::Status WriteStringToFile(Env* env, const string& fname, + const StringPiece& data) { std::unique_ptr file; - Status s = env->NewWritableFile(fname, &file); + absl::Status s = env->NewWritableFile(fname, &file); if (!s.ok()) { return s; } @@ -507,8 +509,8 @@ Status WriteStringToFile(Env* env, const string& fname, return s; } -Status FileSystemCopyFile(FileSystem* src_fs, const string& src, - FileSystem* target_fs, const string& target) { +absl::Status FileSystemCopyFile(FileSystem* src_fs, const string& src, + FileSystem* target_fs, const string& target) { std::unique_ptr src_file; TF_RETURN_IF_ERROR(src_fs->NewRandomAccessFile(src, &src_file)); @@ -525,7 +527,7 @@ Status FileSystemCopyFile(FileSystem* src_fs, const string& src, uint64 offset = 0; std::unique_ptr scratch(new char[kCopyFileBufferSize]); - Status s = OkStatus(); + absl::Status s = absl::OkStatus(); while (s.ok()) { StringPiece result; s = src_file->Read(offset, kCopyFileBufferSize, &result, scratch.get()); @@ -550,11 +552,11 @@ class FileStream : public protobuf::io::ZeroCopyInputStream { return true; } int64_t ByteCount() const override { return pos_; } - Status status() const { return status_; } + absl::Status status() const { return status_; } bool Next(const void** data, int* size) override { StringPiece result; - Status s = file_->Read(pos_, kBufSize, &result, scratch_); + absl::Status s = file_->Read(pos_, kBufSize, &result, scratch_); if (result.empty()) { status_ = s; return false; @@ -570,21 +572,21 @@ class FileStream : public protobuf::io::ZeroCopyInputStream { RandomAccessFile* file_; int64_t pos_; - Status status_; + absl::Status status_; char scratch_[kBufSize]; }; } // namespace -Status WriteBinaryProto(Env* env, const string& fname, - const protobuf::MessageLite& proto) { +absl::Status WriteBinaryProto(Env* env, const string& fname, + const protobuf::MessageLite& proto) { string serialized; proto.AppendToString(&serialized); return WriteStringToFile(env, fname, serialized); } -Status ReadBinaryProto(Env* env, const string& fname, - protobuf::MessageLite* proto) { +absl::Status ReadBinaryProto(Env* env, const string& fname, + protobuf::MessageLite* proto) { std::unique_ptr file; TF_RETURN_IF_ERROR(env->NewRandomAccessFile(fname, &file)); std::unique_ptr stream(new FileStream(file.get())); @@ -595,11 +597,11 @@ Status ReadBinaryProto(Env* env, const string& fname, TF_RETURN_IF_ERROR(stream->status()); return errors::DataLoss("Can't parse ", fname, " as binary proto"); } - return OkStatus(); + return absl::OkStatus(); } -Status WriteTextProto(Env* env, const string& fname, - const protobuf::Message& proto) { +absl::Status WriteTextProto(Env* env, const string& fname, + const protobuf::Message& proto) { string serialized; if (!protobuf::TextFormat::PrintToString(proto, &serialized)) { return errors::FailedPrecondition("Unable to convert proto to text."); @@ -607,7 +609,8 @@ Status WriteTextProto(Env* env, const string& fname, return WriteStringToFile(env, fname, serialized); } -Status ReadTextProto(Env* env, const string& fname, protobuf::Message* proto) { +absl::Status ReadTextProto(Env* env, const string& fname, + protobuf::Message* proto) { std::unique_ptr file; TF_RETURN_IF_ERROR(env->NewRandomAccessFile(fname, &file)); std::unique_ptr stream(new FileStream(file.get())); @@ -616,19 +619,19 @@ Status ReadTextProto(Env* env, const string& fname, protobuf::Message* proto) { TF_RETURN_IF_ERROR(stream->status()); return errors::DataLoss("Can't parse ", fname, " as text proto"); } - return OkStatus(); + return absl::OkStatus(); } -Status ReadTextOrBinaryProto(Env* env, const string& fname, - protobuf::Message* proto) { +absl::Status ReadTextOrBinaryProto(Env* env, const string& fname, + protobuf::Message* proto) { if (ReadTextProto(env, fname, proto).ok()) { - return OkStatus(); + return absl::OkStatus(); } return ReadBinaryProto(env, fname, proto); } -Status ReadTextOrBinaryProto(Env* env, const string& fname, - protobuf::MessageLite* proto) { +absl::Status ReadTextOrBinaryProto(Env* env, const string& fname, + protobuf::MessageLite* proto) { return ReadBinaryProto(env, fname, proto); } diff --git a/third_party/xla/third_party/tsl/tsl/platform/env.h b/third_party/xla/third_party/tsl/tsl/platform/env.h index 35b446a99445a5..6953ac8722976a 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/env.h +++ b/third_party/xla/third_party/tsl/tsl/platform/env.h @@ -79,16 +79,16 @@ class Env { /// for the file system related (non-virtual) functions that follow. /// Returned FileSystem object is still owned by the Env object and will // (might) be destroyed when the environment is destroyed. - virtual Status GetFileSystemForFile(const std::string& fname, - FileSystem** result); + virtual absl::Status GetFileSystemForFile(const std::string& fname, + FileSystem** result); /// \brief Returns the file system schemes registered for this Env. - virtual Status GetRegisteredFileSystemSchemes( + virtual absl::Status GetRegisteredFileSystemSchemes( std::vector* schemes); /// \brief Register a file system for a scheme. - virtual Status RegisterFileSystem(const std::string& scheme, - FileSystemRegistry::Factory factory); + virtual absl::Status RegisterFileSystem(const std::string& scheme, + FileSystemRegistry::Factory factory); /// \brief Register a modular file system for a scheme. /// @@ -96,23 +96,23 @@ class Env { /// /// TODO(b/139060984): After all filesystems are converted, make this be the /// canonical registration function. - virtual Status RegisterFileSystem(const std::string& scheme, - std::unique_ptr filesystem); + virtual absl::Status RegisterFileSystem( + const std::string& scheme, std::unique_ptr filesystem); - Status SetOption(const std::string& scheme, const std::string& key, - const std::string& value); + absl::Status SetOption(const std::string& scheme, const std::string& key, + const std::string& value); - Status SetOption(const std::string& scheme, const std::string& key, - const std::vector& values); + absl::Status SetOption(const std::string& scheme, const std::string& key, + const std::vector& values); - Status SetOption(const std::string& scheme, const std::string& key, - const std::vector& values); + absl::Status SetOption(const std::string& scheme, const std::string& key, + const std::vector& values); - Status SetOption(const std::string& scheme, const std::string& key, - const std::vector& values); + absl::Status SetOption(const std::string& scheme, const std::string& key, + const std::vector& values); /// \brief Flush filesystem caches for all registered filesystems. - Status FlushFileSystemCaches(); + absl::Status FlushFileSystemCaches(); /// \brief Creates a brand new random access read-only file with the /// specified name. @@ -127,14 +127,15 @@ class Env { /// The ownership of the returned RandomAccessFile is passed to the caller /// and the object should be deleted when is not used. The file object /// shouldn't live longer than the Env object. - Status NewRandomAccessFile(const std::string& fname, - std::unique_ptr* result); + absl::Status NewRandomAccessFile(const std::string& fname, + std::unique_ptr* result); - Status NewRandomAccessFile(const std::string& fname, TransactionToken* token, - std::unique_ptr* result) { + absl::Status NewRandomAccessFile(const std::string& fname, + TransactionToken* token, + std::unique_ptr* result) { // We duplicate these methods due to Google internal coding style prevents // virtual functions with default arguments. See PR #41615. - return OkStatus(); + return absl::OkStatus(); } /// \brief Creates an object that writes to a new file with the specified @@ -150,12 +151,13 @@ class Env { /// The ownership of the returned WritableFile is passed to the caller /// and the object should be deleted when is not used. The file object /// shouldn't live longer than the Env object. - Status NewWritableFile(const std::string& fname, - std::unique_ptr* result); + absl::Status NewWritableFile(const std::string& fname, + std::unique_ptr* result); - Status NewWritableFile(const std::string& fname, TransactionToken* token, - std::unique_ptr* result) { - return OkStatus(); + absl::Status NewWritableFile(const std::string& fname, + TransactionToken* token, + std::unique_ptr* result) { + return absl::OkStatus(); } /// \brief Creates an object that either appends to an existing file, or @@ -170,12 +172,13 @@ class Env { /// The ownership of the returned WritableFile is passed to the caller /// and the object should be deleted when is not used. The file object /// shouldn't live longer than the Env object. - Status NewAppendableFile(const std::string& fname, - std::unique_ptr* result); + absl::Status NewAppendableFile(const std::string& fname, + std::unique_ptr* result); - Status NewAppendableFile(const std::string& fname, TransactionToken* token, - std::unique_ptr* result) { - return OkStatus(); + absl::Status NewAppendableFile(const std::string& fname, + TransactionToken* token, + std::unique_ptr* result) { + return absl::OkStatus(); } /// \brief Creates a readonly region of memory with the file context. /// @@ -188,30 +191,30 @@ class Env { /// The ownership of the returned ReadOnlyMemoryRegion is passed to the caller /// and the object should be deleted when is not used. The memory region /// object shouldn't live longer than the Env object. - Status NewReadOnlyMemoryRegionFromFile( + absl::Status NewReadOnlyMemoryRegionFromFile( const std::string& fname, std::unique_ptr* result); - Status NewReadOnlyMemoryRegionFromFile( + absl::Status NewReadOnlyMemoryRegionFromFile( const std::string& fname, TransactionToken* token, std::unique_ptr* result) { - return OkStatus(); + return absl::OkStatus(); } /// Returns OK if the named path exists and NOT_FOUND otherwise. - Status FileExists(const std::string& fname); + absl::Status FileExists(const std::string& fname); - Status FileExists(const std::string& fname, TransactionToken* token) { - return OkStatus(); + absl::Status FileExists(const std::string& fname, TransactionToken* token) { + return absl::OkStatus(); } /// Returns true if all the listed files exist, false otherwise. /// if status is not null, populate the vector with a detailed status /// for each file. bool FilesExist(const std::vector& files, - std::vector* status); + std::vector* status); bool FilesExist(const std::vector& files, TransactionToken* token, - std::vector* status) { + std::vector* status) { return true; } @@ -219,11 +222,11 @@ class Env { /// directory. The names are relative to "dir". /// /// Original contents of *results are dropped. - Status GetChildren(const std::string& dir, std::vector* result); + absl::Status GetChildren(const std::string& dir, std::vector* result); - Status GetChildren(const std::string& dir, TransactionToken* token, - std::vector* result) { - return OkStatus(); + absl::Status GetChildren(const std::string& dir, TransactionToken* token, + std::vector* result) { + return absl::OkStatus(); } /// \brief Returns true if the path matches the given pattern. The wildcards @@ -235,19 +238,20 @@ class Env { /// that pattern. *results is cleared. /// /// More details about `pattern` in FileSystem::GetMatchingPaths. - virtual Status GetMatchingPaths(const std::string& pattern, - std::vector* results); + virtual absl::Status GetMatchingPaths(const std::string& pattern, + std::vector* results); - Status GetMatchingPaths(const std::string& pattern, TransactionToken* token, - std::vector* results) { - return OkStatus(); + absl::Status GetMatchingPaths(const std::string& pattern, + TransactionToken* token, + std::vector* results) { + return absl::OkStatus(); } /// Deletes the named file. - Status DeleteFile(const std::string& fname); + absl::Status DeleteFile(const std::string& fname); - Status DeleteFile(const std::string& fname, TransactionToken* token) { - return OkStatus(); + absl::Status DeleteFile(const std::string& fname, TransactionToken* token) { + return absl::OkStatus(); } /// \brief Deletes the specified directory and all subdirectories and files @@ -274,12 +278,15 @@ class Env { /// * PERMISSION_DENIED - dirname or some descendant is not writable /// * UNIMPLEMENTED - Some underlying functions (like Delete) are not /// implemented - Status DeleteRecursively(const std::string& dirname, int64_t* undeleted_files, - int64_t* undeleted_dirs); + absl::Status DeleteRecursively(const std::string& dirname, + int64_t* undeleted_files, + int64_t* undeleted_dirs); - Status DeleteRecursively(const std::string& dirname, TransactionToken* token, - int64_t* undeleted_files, int64_t* undeleted_dirs) { - return OkStatus(); + absl::Status DeleteRecursively(const std::string& dirname, + TransactionToken* token, + int64_t* undeleted_files, + int64_t* undeleted_dirs) { + return absl::OkStatus(); } /// \brief Creates the specified directory and all the necessary @@ -287,35 +294,35 @@ class Env { /// * OK - successfully created the directory and sub directories, even if /// they were already created. /// * PERMISSION_DENIED - dirname or some subdirectory is not writable. - Status RecursivelyCreateDir(const std::string& dirname); + absl::Status RecursivelyCreateDir(const std::string& dirname); - Status RecursivelyCreateDir(const std::string& dirname, - TransactionToken* token) { - return OkStatus(); + absl::Status RecursivelyCreateDir(const std::string& dirname, + TransactionToken* token) { + return absl::OkStatus(); } /// \brief Creates the specified directory. Typical return codes /// * OK - successfully created the directory. /// * ALREADY_EXISTS - directory already exists. /// * PERMISSION_DENIED - dirname is not writable. - Status CreateDir(const std::string& dirname); + absl::Status CreateDir(const std::string& dirname); - Status CreateDir(const std::string& dirname, TransactionToken* token) { - return OkStatus(); + absl::Status CreateDir(const std::string& dirname, TransactionToken* token) { + return absl::OkStatus(); } /// Deletes the specified directory. - Status DeleteDir(const std::string& dirname); + absl::Status DeleteDir(const std::string& dirname); - Status DeleteDir(const std::string& dirname, TransactionToken* token) { - return OkStatus(); + absl::Status DeleteDir(const std::string& dirname, TransactionToken* token) { + return absl::OkStatus(); } /// Obtains statistics for the given path. - Status Stat(const std::string& fname, FileStatistics* stat); + absl::Status Stat(const std::string& fname, FileStatistics* stat); - Status Stat(const std::string& fname, TransactionToken* token, - FileStatistics* stat) { - return OkStatus(); + absl::Status Stat(const std::string& fname, TransactionToken* token, + FileStatistics* stat) { + return absl::OkStatus(); } /// \brief Returns whether the given path is a directory or not. @@ -325,7 +332,7 @@ class Env { /// * NOT_FOUND - The path entry does not exist. /// * PERMISSION_DENIED - Insufficient permissions. /// * UNIMPLEMENTED - The file factory doesn't support directories. - Status IsDirectory(const std::string& fname); + absl::Status IsDirectory(const std::string& fname); /// \brief Returns whether the given path is on a file system /// that has atomic move capabilities. This can be used @@ -337,63 +344,66 @@ class Env { /// so has_atomic_move holds the above information. /// * UNIMPLEMENTED - The file system of the path hasn't been implemented in /// TF - Status HasAtomicMove(const std::string& path, bool* has_atomic_move); + absl::Status HasAtomicMove(const std::string& path, bool* has_atomic_move); /// Stores the size of `fname` in `*file_size`. - Status GetFileSize(const std::string& fname, uint64* file_size); + absl::Status GetFileSize(const std::string& fname, uint64* file_size); - Status GetFileSize(const std::string& fname, TransactionToken* token, - uint64* file_size) { - return OkStatus(); + absl::Status GetFileSize(const std::string& fname, TransactionToken* token, + uint64* file_size) { + return absl::OkStatus(); } /// \brief Renames file src to target. If target already exists, it will be /// replaced. - Status RenameFile(const std::string& src, const std::string& target); + absl::Status RenameFile(const std::string& src, const std::string& target); - Status RenameFile(const std::string& src, const std::string& target, - TransactionToken* token) { - return OkStatus(); + absl::Status RenameFile(const std::string& src, const std::string& target, + TransactionToken* token) { + return absl::OkStatus(); } /// \brief Copy the src to target. - Status CopyFile(const std::string& src, const std::string& target); + absl::Status CopyFile(const std::string& src, const std::string& target); - Status CopyFile(const std::string& src, const std::string& target, - TransactionToken* token) { - return OkStatus(); + absl::Status CopyFile(const std::string& src, const std::string& target, + TransactionToken* token) { + return absl::OkStatus(); } /// \brief starts a new transaction on the filesystem that handles filename - Status StartTransaction(const std::string& filename, - TransactionToken** token) { + absl::Status StartTransaction(const std::string& filename, + TransactionToken** token) { *token = nullptr; - return OkStatus(); + return absl::OkStatus(); } /// \brief Adds `path` to transaction in `token` if token belongs to /// filesystem that handles the path. - Status AddToTransaction(const std::string& path, TransactionToken* token) { - return OkStatus(); + absl::Status AddToTransaction(const std::string& path, + TransactionToken* token) { + return absl::OkStatus(); } /// \brief Get token for `path` or start a new transaction and add `path` to /// it. - Status GetTokenOrStartTransaction(const std::string& path, - TransactionToken** token) { + absl::Status GetTokenOrStartTransaction(const std::string& path, + TransactionToken** token) { *token = nullptr; - return OkStatus(); + return absl::OkStatus(); } /// \brief Returns the transaction for `path` or nullptr in `token` - Status GetTransactionForPath(const std::string& path, - TransactionToken** token) { + absl::Status GetTransactionForPath(const std::string& path, + TransactionToken** token) { *token = nullptr; - return OkStatus(); + return absl::OkStatus(); } /// \brief Finalizes the transaction - Status EndTransaction(TransactionToken* token) { return OkStatus(); } + absl::Status EndTransaction(TransactionToken* token) { + return absl::OkStatus(); + } /// \brief Returns the absolute path of the current executable. It resolves /// symlinks if there is any. @@ -469,8 +479,8 @@ class Env { // OK from the function. // Otherwise returns nullptr in "*handle" and an error status from the // function. - virtual Status LoadDynamicLibrary(const char* library_filename, - void** handle) = 0; + virtual absl::Status LoadDynamicLibrary(const char* library_filename, + void** handle) = 0; // \brief Get a pointer to a symbol from a dynamic library. // @@ -478,8 +488,9 @@ class Env { // On success, store a pointer to the located symbol in "*symbol" and return // OK from the function. Otherwise, returns nullptr in "*symbol" and an error // status from the function. - virtual Status GetSymbolFromLibrary(void* handle, const char* symbol_name, - void** symbol) = 0; + virtual absl::Status GetSymbolFromLibrary(void* handle, + const char* symbol_name, + void** symbol) = 0; // \brief build the name of dynamic library. // @@ -511,17 +522,18 @@ class EnvWrapper : public Env { /// Returns the target to which this Env forwards all calls Env* target() const { return target_; } - Status GetFileSystemForFile(const std::string& fname, - FileSystem** result) override { + absl::Status GetFileSystemForFile(const std::string& fname, + FileSystem** result) override { return target_->GetFileSystemForFile(fname, result); } - Status GetRegisteredFileSystemSchemes(std::vector* schemes) override { + absl::Status GetRegisteredFileSystemSchemes( + std::vector* schemes) override { return target_->GetRegisteredFileSystemSchemes(schemes); } - Status RegisterFileSystem(const std::string& scheme, - FileSystemRegistry::Factory factory) override { + absl::Status RegisterFileSystem( + const std::string& scheme, FileSystemRegistry::Factory factory) override { return target_->RegisterFileSystem(scheme, factory); } @@ -549,12 +561,12 @@ class EnvWrapper : public Env { absl::AnyInvocable closure) override { target_->SchedClosureAfter(micros, std::move(closure)); } - Status LoadDynamicLibrary(const char* library_filename, - void** handle) override { + absl::Status LoadDynamicLibrary(const char* library_filename, + void** handle) override { return target_->LoadDynamicLibrary(library_filename, handle); } - Status GetSymbolFromLibrary(void* handle, const char* symbol_name, - void** symbol) override { + absl::Status GetSymbolFromLibrary(void* handle, const char* symbol_name, + void** symbol) override { return target_->GetSymbolFromLibrary(handle, symbol_name, symbol); } std::string FormatLibraryFileName(const std::string& name, @@ -608,49 +620,53 @@ struct ThreadOptions { /// A utility routine: copy contents of `src` in file system `src_fs` /// to `target` in file system `target_fs`. -Status FileSystemCopyFile(FileSystem* src_fs, const std::string& src, - FileSystem* target_fs, const std::string& target); +absl::Status FileSystemCopyFile(FileSystem* src_fs, const std::string& src, + FileSystem* target_fs, + const std::string& target); /// A utility routine: reads contents of named file into `*data` -Status ReadFileToString(Env* env, const std::string& fname, std::string* data); +absl::Status ReadFileToString(Env* env, const std::string& fname, + std::string* data); /// A utility routine: write contents of `data` to file named `fname` /// (overwriting existing contents, if any). -Status WriteStringToFile(Env* env, const std::string& fname, - const StringPiece& data); +absl::Status WriteStringToFile(Env* env, const std::string& fname, + const StringPiece& data); /// Write binary representation of "proto" to the named file. -Status WriteBinaryProto(Env* env, const std::string& fname, - const protobuf::MessageLite& proto); +absl::Status WriteBinaryProto(Env* env, const std::string& fname, + const protobuf::MessageLite& proto); /// Reads contents of named file and parse as binary encoded proto data /// and store into `*proto`. -Status ReadBinaryProto(Env* env, const std::string& fname, - protobuf::MessageLite* proto); +absl::Status ReadBinaryProto(Env* env, const std::string& fname, + protobuf::MessageLite* proto); /// Write the text representation of "proto" to the named file. -inline Status WriteTextProto(Env* /* env */, const std::string& /* fname */, - const protobuf::MessageLite& /* proto */) { +inline absl::Status WriteTextProto(Env* /* env */, + const std::string& /* fname */, + const protobuf::MessageLite& /* proto */) { return errors::Unimplemented("Can't write text protos with protolite."); } -Status WriteTextProto(Env* env, const std::string& fname, - const protobuf::Message& proto); +absl::Status WriteTextProto(Env* env, const std::string& fname, + const protobuf::Message& proto); /// Read contents of named file and parse as text encoded proto data /// and store into `*proto`. -inline Status ReadTextProto(Env* /* env */, const std::string& /* fname */, - protobuf::MessageLite* /* proto */) { +inline absl::Status ReadTextProto(Env* /* env */, + const std::string& /* fname */, + protobuf::MessageLite* /* proto */) { return errors::Unimplemented("Can't parse text protos with protolite."); } -Status ReadTextProto(Env* env, const std::string& fname, - protobuf::Message* proto); +absl::Status ReadTextProto(Env* env, const std::string& fname, + protobuf::Message* proto); /// Read contents of named file and parse as either text or binary encoded proto /// data and store into `*proto`. -Status ReadTextOrBinaryProto(Env* env, const std::string& fname, - protobuf::Message* proto); -Status ReadTextOrBinaryProto(Env* env, const std::string& fname, - protobuf::MessageLite* proto); +absl::Status ReadTextOrBinaryProto(Env* env, const std::string& fname, + protobuf::Message* proto); +absl::Status ReadTextOrBinaryProto(Env* env, const std::string& fname, + protobuf::MessageLite* proto); // START_SKIP_DOXYGEN diff --git a/third_party/xla/third_party/tsl/tsl/platform/errors.cc b/third_party/xla/third_party/tsl/tsl/platform/errors.cc index 7f61fb678954ed..6c732a47849113 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/errors.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/errors.cc @@ -175,72 +175,73 @@ absl::StatusCode ErrnoToCode(int err_number) { } // namespace -Status IOError(const string& context, int err_number) { +absl::Status IOError(const string& context, int err_number) { auto code = ErrnoToCode(err_number); - return Status(code, strings::StrCat(context, "; ", strerror(err_number))); + return absl::Status(code, + strings::StrCat(context, "; ", strerror(err_number))); } -bool IsAborted(const Status& status) { +bool IsAborted(const absl::Status& status) { return status.code() == tsl::error::Code::ABORTED; } -bool IsAlreadyExists(const Status& status) { +bool IsAlreadyExists(const absl::Status& status) { return status.code() == tsl::error::Code::ALREADY_EXISTS; } -bool IsCancelled(const Status& status) { +bool IsCancelled(const absl::Status& status) { return status.code() == tsl::error::Code::CANCELLED; } -bool IsDataLoss(const Status& status) { +bool IsDataLoss(const absl::Status& status) { return status.code() == tsl::error::Code::DATA_LOSS; } -bool IsDeadlineExceeded(const Status& status) { +bool IsDeadlineExceeded(const absl::Status& status) { return status.code() == tsl::error::Code::DEADLINE_EXCEEDED; } -bool IsFailedPrecondition(const Status& status) { +bool IsFailedPrecondition(const absl::Status& status) { return status.code() == tsl::error::Code::FAILED_PRECONDITION; } -bool IsInternal(const Status& status) { +bool IsInternal(const absl::Status& status) { return status.code() == tsl::error::Code::INTERNAL; } -bool IsInvalidArgument(const Status& status) { +bool IsInvalidArgument(const absl::Status& status) { return status.code() == tsl::error::Code::INVALID_ARGUMENT; } -bool IsNotFound(const Status& status) { +bool IsNotFound(const absl::Status& status) { return status.code() == tsl::error::Code::NOT_FOUND; } -bool IsOutOfRange(const Status& status) { +bool IsOutOfRange(const absl::Status& status) { return status.code() == tsl::error::Code::OUT_OF_RANGE; } -bool IsPermissionDenied(const Status& status) { +bool IsPermissionDenied(const absl::Status& status) { return status.code() == tsl::error::Code::PERMISSION_DENIED; } -bool IsResourceExhausted(const Status& status) { +bool IsResourceExhausted(const absl::Status& status) { return status.code() == tsl::error::Code::RESOURCE_EXHAUSTED; } -bool IsUnauthenticated(const Status& status) { +bool IsUnauthenticated(const absl::Status& status) { return status.code() == tsl::error::Code::UNAUTHENTICATED; } -bool IsUnavailable(const Status& status) { +bool IsUnavailable(const absl::Status& status) { return status.code() == tsl::error::Code::UNAVAILABLE; } -bool IsUnimplemented(const Status& status) { +bool IsUnimplemented(const absl::Status& status) { return status.code() == tsl::error::Code::UNIMPLEMENTED; } -bool IsUnknown(const Status& status) { +bool IsUnknown(const absl::Status& status) { return status.code() == tsl::error::Code::UNKNOWN; } diff --git a/third_party/xla/third_party/tsl/tsl/platform/errors.h b/third_party/xla/third_party/tsl/tsl/platform/errors.h index 9008dedad8270c..1d86af35cd3efd 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/errors.h +++ b/third_party/xla/third_party/tsl/tsl/platform/errors.h @@ -88,11 +88,11 @@ inline const strings::AlphaNum& PrepareForStrCat(const strings::AlphaNum& a) { } // namespace internal // Maps UNIX errors into a Status. -Status IOError(const string& context, int err_number); +absl::Status IOError(const string& context, int err_number); // Returns all payloads from a Status as a key-value map. inline std::unordered_map GetPayloads( - const ::tsl::Status& status) { + const absl::Status& status) { std::unordered_map payloads; status.ForEachPayload( [&payloads](::tsl::StringPiece key, const absl::Cord& value) { @@ -104,7 +104,7 @@ inline std::unordered_map GetPayloads( // Inserts all given payloads into the given status. Will overwrite existing // payloads if they exist with the same key. inline void InsertPayloads( - ::tsl::Status& status, + absl::Status& status, const std::unordered_map& payloads) { for (const auto& payload : payloads) { status.SetPayload(payload.first, absl::Cord(payload.second)); @@ -113,7 +113,7 @@ inline void InsertPayloads( // Copies all payloads from one Status to another. Will overwrite existing // payloads in the destination if they exist with the same key. -inline void CopyPayloads(const ::tsl::Status& from, ::tsl::Status& to) { +inline void CopyPayloads(const absl::Status& from, absl::Status& to) { from.ForEachPayload([&to](::tsl::StringPiece key, const absl::Cord& value) { to.SetPayload(key, value); }); @@ -121,22 +121,22 @@ inline void CopyPayloads(const ::tsl::Status& from, ::tsl::Status& to) { #if defined(PLATFORM_GOOGLE) // Creates a new status with the given code, message and payloads. -inline ::tsl::Status Create( +inline absl::Status Create( absl::StatusCode code, ::tsl::StringPiece message, const std::unordered_map& payloads, absl::SourceLocation loc = absl::SourceLocation::current()) { - Status status(code, message, loc); + absl::Status status(code, message, loc); InsertPayloads(status, payloads); return status; } // Returns a new Status, replacing its message with the given. -inline ::tsl::Status CreateWithUpdatedMessage(const ::tsl::Status& status, - ::tsl::StringPiece message) { +inline absl::Status CreateWithUpdatedMessage(const absl::Status& status, + ::tsl::StringPiece message) { auto locations = status.GetSourceLocations(); auto initial_loc = locations.empty() ? absl::SourceLocation::current() : locations[0]; - Status new_status = Create(static_cast(status.code()), - message, GetPayloads(status), initial_loc); + absl::Status new_status = Create(static_cast(status.code()), + message, GetPayloads(status), initial_loc); if (locations.size() > 1) { for (auto loc : locations.subspan(1)) { new_status.AddSourceLocation(loc); @@ -165,7 +165,7 @@ inline ::tsl::Status CreateWithUpdatedMessage(const ::tsl::Status& status, // context put it on a new line, since it is possible for there // to be several layers of additional context. template -void AppendToMessage(::tsl::Status* status, Args... args) { +void AppendToMessage(absl::Status* status, Args... args) { auto new_status = CreateWithUpdatedMessage( *status, ::tsl::strings::StrCat(status->message(), "\n\t", args...)); CopyPayloads(*status, new_status); @@ -199,13 +199,13 @@ void AppendToMessage(::tsl::Status* status, Args... args) { // CANCELLED template -::tsl::Status Cancelled(Args... args) { - return ::tsl::Status(absl::StatusCode::kCancelled, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status Cancelled(Args... args) { + return absl::Status(absl::StatusCode::kCancelled, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status CancelledWithPayloads( +absl::Status CancelledWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kCancelled, message, payloads); @@ -213,10 +213,10 @@ ::tsl::Status CancelledWithPayloads( // InvalidArgument template -::tsl::Status InvalidArgument(Args... args) { - return ::tsl::Status(absl::StatusCode::kInvalidArgument, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status InvalidArgument(Args... args) { + return absl::Status(absl::StatusCode::kInvalidArgument, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } #if defined(PLATFORM_GOOGLE) @@ -225,7 +225,7 @@ template ::absl::Status InvalidArgument( Arg1 arg1, Arg2 arg2, Arg3 arg3, Arg4 arg4, absl::SourceLocation loc = absl::SourceLocation::current()) { - return ::tsl::Status( + return absl::Status( absl::StatusCode::kInvalidArgument, ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), ::tsl::errors::internal::PrepareForStrCat(arg2), @@ -237,7 +237,7 @@ template ::absl::Status InvalidArgument( Arg1 arg1, Arg2 arg2, Arg3 arg3, absl::SourceLocation loc = absl::SourceLocation::current()) { - return ::tsl::Status( + return absl::Status( absl::StatusCode::kInvalidArgument, ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), ::tsl::errors::internal::PrepareForStrCat(arg2), @@ -248,7 +248,7 @@ template ::absl::Status InvalidArgument( Arg1 arg1, Arg2 arg2, absl::SourceLocation loc = absl::SourceLocation::current()) { - return ::tsl::Status( + return absl::Status( absl::StatusCode::kInvalidArgument, ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), ::tsl::errors::internal::PrepareForStrCat(arg2)), @@ -257,7 +257,7 @@ ::absl::Status InvalidArgument( template ::absl::Status InvalidArgument( Arg1 arg1, absl::SourceLocation loc = absl::SourceLocation::current()) { - return ::tsl::Status( + return absl::Status( absl::StatusCode::kInvalidArgument, ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1)), loc); @@ -302,10 +302,10 @@ ::absl::Status InvalidArgumentWithPayloads( // NotFound template -::tsl::Status NotFound(Args... args) { - return ::tsl::Status(absl::StatusCode::kNotFound, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status NotFound(Args... args) { + return absl::Status(absl::StatusCode::kNotFound, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } #if defined(PLATFORM_GOOGLE) // Specialized overloads to capture source location for up to three arguments. @@ -313,7 +313,7 @@ template ::absl::Status NotFound( Arg1 arg1, Arg2 arg2, Arg3 arg3, absl::SourceLocation loc = absl::SourceLocation::current()) { - return ::tsl::Status( + return absl::Status( absl::StatusCode::kNotFound, ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), ::tsl::errors::internal::PrepareForStrCat(arg2), @@ -324,7 +324,7 @@ template ::absl::Status NotFound( Arg1 arg1, Arg2 arg2, absl::SourceLocation loc = absl::SourceLocation::current()) { - return ::tsl::Status( + return absl::Status( absl::StatusCode::kNotFound, ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1), ::tsl::errors::internal::PrepareForStrCat(arg2)), @@ -333,7 +333,7 @@ ::absl::Status NotFound( template ::absl::Status NotFound( Arg1 arg1, absl::SourceLocation loc = absl::SourceLocation::current()) { - return ::tsl::Status( + return absl::Status( absl::StatusCode::kNotFound, ::tsl::strings::StrCat(::tsl::errors::internal::PrepareForStrCat(arg1)), loc); @@ -377,13 +377,13 @@ ::absl::Status NotFoundWithPayloads( // AlreadyExists template -::tsl::Status AlreadyExists(Args... args) { - return ::tsl::Status(absl::StatusCode::kAlreadyExists, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status AlreadyExists(Args... args) { + return absl::Status(absl::StatusCode::kAlreadyExists, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status AlreadyExistsWithPayloads( +absl::Status AlreadyExistsWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kAlreadyExists, message, payloads); @@ -391,13 +391,13 @@ ::tsl::Status AlreadyExistsWithPayloads( // ResourceExhausted template -::tsl::Status ResourceExhausted(Args... args) { - return ::tsl::Status(absl::StatusCode::kResourceExhausted, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status ResourceExhausted(Args... args) { + return absl::Status(absl::StatusCode::kResourceExhausted, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status ResourceExhaustedWithPayloads( +absl::Status ResourceExhaustedWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kResourceExhausted, message, @@ -406,13 +406,13 @@ ::tsl::Status ResourceExhaustedWithPayloads( // Unavailable template -::tsl::Status Unavailable(Args... args) { - return ::tsl::Status(absl::StatusCode::kUnavailable, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status Unavailable(Args... args) { + return absl::Status(absl::StatusCode::kUnavailable, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status UnavailableWithPayloads( +absl::Status UnavailableWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kUnavailable, message, payloads); @@ -420,13 +420,13 @@ ::tsl::Status UnavailableWithPayloads( // FailedPrecondition template -::tsl::Status FailedPrecondition(Args... args) { - return ::tsl::Status(absl::StatusCode::kFailedPrecondition, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status FailedPrecondition(Args... args) { + return absl::Status(absl::StatusCode::kFailedPrecondition, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status FailedPreconditionWithPayloads( +absl::Status FailedPreconditionWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kFailedPrecondition, message, @@ -435,13 +435,13 @@ ::tsl::Status FailedPreconditionWithPayloads( // OutOfRange template -::tsl::Status OutOfRange(Args... args) { - return ::tsl::Status(absl::StatusCode::kOutOfRange, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status OutOfRange(Args... args) { + return absl::Status(absl::StatusCode::kOutOfRange, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status OutOfRangeWithPayloads( +absl::Status OutOfRangeWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kOutOfRange, message, payloads); @@ -449,13 +449,13 @@ ::tsl::Status OutOfRangeWithPayloads( // Unimplemented template -::tsl::Status Unimplemented(Args... args) { - return ::tsl::Status(absl::StatusCode::kUnimplemented, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status Unimplemented(Args... args) { + return absl::Status(absl::StatusCode::kUnimplemented, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status UnimplementedWithPayloads( +absl::Status UnimplementedWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kUnimplemented, message, payloads); @@ -463,13 +463,13 @@ ::tsl::Status UnimplementedWithPayloads( // Internal template -::tsl::Status Internal(Args... args) { - return ::tsl::Status(absl::StatusCode::kInternal, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status Internal(Args... args) { + return absl::Status(absl::StatusCode::kInternal, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status InternalWithPayloads( +absl::Status InternalWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kInternal, message, payloads); @@ -477,13 +477,13 @@ ::tsl::Status InternalWithPayloads( // Aborted template -::tsl::Status Aborted(Args... args) { - return ::tsl::Status(absl::StatusCode::kAborted, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status Aborted(Args... args) { + return absl::Status(absl::StatusCode::kAborted, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status AbortedWithPayloads( +absl::Status AbortedWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kAborted, message, payloads); @@ -491,13 +491,13 @@ ::tsl::Status AbortedWithPayloads( // DeadlineExceeded template -::tsl::Status DeadlineExceeded(Args... args) { - return ::tsl::Status(absl::StatusCode::kDeadlineExceeded, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status DeadlineExceeded(Args... args) { + return absl::Status(absl::StatusCode::kDeadlineExceeded, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status DeadlineExceededWithPayloads( +absl::Status DeadlineExceededWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kDeadlineExceeded, message, payloads); @@ -505,13 +505,13 @@ ::tsl::Status DeadlineExceededWithPayloads( // DataLoss template -::tsl::Status DataLoss(Args... args) { - return ::tsl::Status(absl::StatusCode::kDataLoss, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status DataLoss(Args... args) { + return absl::Status(absl::StatusCode::kDataLoss, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status DataLossWithPayloads( +absl::Status DataLossWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kDataLoss, message, payloads); @@ -519,26 +519,26 @@ ::tsl::Status DataLossWithPayloads( // Unknown template -::tsl::Status Unknown(Args... args) { - return ::tsl::Status(absl::StatusCode::kUnknown, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status Unknown(Args... args) { + return absl::Status(absl::StatusCode::kUnknown, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status UnknownPayloads( +absl::Status UnknownPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kUnknown, message, payloads); } // PermissionDenied template -::tsl::Status PermissionDenied(Args... args) { - return ::tsl::Status(absl::StatusCode::kPermissionDenied, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status PermissionDenied(Args... args) { + return absl::Status(absl::StatusCode::kPermissionDenied, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status PermissionDeniedWithPayloads( +absl::Status PermissionDeniedWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kPermissionDenied, message, payloads); @@ -546,34 +546,34 @@ ::tsl::Status PermissionDeniedWithPayloads( // Unauthenticated template -::tsl::Status Unauthenticated(Args... args) { - return ::tsl::Status(absl::StatusCode::kUnauthenticated, - ::tsl::strings::StrCat( - ::tsl::errors::internal::PrepareForStrCat(args)...)); +absl::Status Unauthenticated(Args... args) { + return absl::Status(absl::StatusCode::kUnauthenticated, + ::tsl::strings::StrCat( + ::tsl::errors::internal::PrepareForStrCat(args)...)); } template -::tsl::Status UnauthenticatedWithPayloads( +absl::Status UnauthenticatedWithPayloads( const ::tsl::StringPiece& message, const std::unordered_map& payloads) { return errors::Create(absl::StatusCode::kUnauthenticated, message, payloads); } -bool IsAborted(const Status& status); -bool IsAlreadyExists(const Status& status); -bool IsCancelled(const Status& status); -bool IsDataLoss(const Status& status); -bool IsDeadlineExceeded(const Status& status); -bool IsFailedPrecondition(const Status& status); -bool IsInternal(const Status& status); -bool IsInvalidArgument(const Status& status); -bool IsNotFound(const Status& status); -bool IsOutOfRange(const Status& status); -bool IsPermissionDenied(const Status& status); -bool IsResourceExhausted(const Status& status); -bool IsUnauthenticated(const Status& status); -bool IsUnavailable(const Status& status); -bool IsUnimplemented(const Status& status); -bool IsUnknown(const Status& status); +bool IsAborted(const absl::Status& status); +bool IsAlreadyExists(const absl::Status& status); +bool IsCancelled(const absl::Status& status); +bool IsDataLoss(const absl::Status& status); +bool IsDeadlineExceeded(const absl::Status& status); +bool IsFailedPrecondition(const absl::Status& status); +bool IsInternal(const absl::Status& status); +bool IsInvalidArgument(const absl::Status& status); +bool IsNotFound(const absl::Status& status); +bool IsOutOfRange(const absl::Status& status); +bool IsPermissionDenied(const absl::Status& status); +bool IsResourceExhausted(const absl::Status& status); +bool IsUnauthenticated(const absl::Status& status); +bool IsUnavailable(const absl::Status& status); +bool IsUnimplemented(const absl::Status& status); +bool IsUnknown(const absl::Status& status); // Produces a formatted string pattern from the name which can uniquely identify // this node upstream to produce an informative error message. The pattern @@ -610,10 +610,10 @@ inline std::string FormatFunctionForError(absl::string_view name) { return strings::StrCat("{{function_node ", name, "}}"); } -inline Status ReplaceErrorFromNonCommunicationOps(const Status s, - absl::string_view op_name) { +inline absl::Status ReplaceErrorFromNonCommunicationOps( + const absl::Status s, absl::string_view op_name) { assert(::tsl::errors::IsUnavailable(s)); - return Status( + return absl::Status( absl::StatusCode::kInternal, strings::StrCat( s.message(), "\nExecuting non-communication op <", op_name, diff --git a/third_party/xla/third_party/tsl/tsl/platform/errors_test.cc b/third_party/xla/third_party/tsl/tsl/platform/errors_test.cc index 3607b90c2d7b3e..88a3a5a78f72a5 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/errors_test.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/errors_test.cc @@ -21,7 +21,7 @@ limitations under the License. namespace tsl { TEST(AppendToMessageTest, PayloadsAreCopied) { - Status status = errors::Aborted("Aborted Error Message"); + absl::Status status = errors::Aborted("Aborted Error Message"); status.SetPayload("payload_key", absl::Cord("payload_value")); errors::AppendToMessage(&status, "Appended Message"); @@ -30,22 +30,22 @@ TEST(AppendToMessageTest, PayloadsAreCopied) { } TEST(Status, GetAllPayloads) { - Status s_error(absl::StatusCode::kInternal, "Error message"); + absl::Status s_error(absl::StatusCode::kInternal, "Error message"); s_error.SetPayload("Error key", absl::Cord("foo")); auto payloads_error_status = errors::GetPayloads(s_error); ASSERT_EQ(payloads_error_status.size(), 1); ASSERT_EQ(payloads_error_status["Error key"], "foo"); - Status s_ok = Status(); + absl::Status s_ok = absl::Status(); auto payloads_ok_status = errors::GetPayloads(s_ok); ASSERT_TRUE(payloads_ok_status.empty()); } TEST(Status, OKStatusInsertPayloadsFromErrorStatus) { // An OK status will should not change after InsertPayloads() calls. - Status s_error(absl::StatusCode::kInternal, "Error message"); + absl::Status s_error(absl::StatusCode::kInternal, "Error message"); s_error.SetPayload("Error key", absl::Cord("foo")); - Status s_ok = Status(); + absl::Status s_ok = absl::Status(); errors::InsertPayloads(s_ok, errors::GetPayloads(s_error)); auto payloads_ok_status = errors::GetPayloads(s_ok); @@ -54,19 +54,19 @@ TEST(Status, OKStatusInsertPayloadsFromErrorStatus) { TEST(Status, ErrorStatusInsertPayloadsFromOKStatus) { // An InsertPayloads() call should not take effect from empty inputs. - Status s_error(absl::StatusCode::kInternal, "Error message"); + absl::Status s_error(absl::StatusCode::kInternal, "Error message"); s_error.SetPayload("Error key", absl::Cord("foo")); - Status s_ok = Status(); + absl::Status s_ok = absl::Status(); errors::InsertPayloads(s_error, errors::GetPayloads(s_ok)); ASSERT_EQ(s_error.GetPayload("Error key"), "foo"); } TEST(Status, ErrorStatusInsertPayloadsFromErrorStatus) { - Status s_error1(absl::StatusCode::kInternal, "Error message"); + absl::Status s_error1(absl::StatusCode::kInternal, "Error message"); s_error1.SetPayload("Error key 1", absl::Cord("foo")); s_error1.SetPayload("Error key 2", absl::Cord("bar")); - Status s_error2(absl::StatusCode::kInternal, "Error message"); + absl::Status s_error2(absl::StatusCode::kInternal, "Error message"); s_error2.SetPayload("Error key", absl::Cord("bar")); ASSERT_EQ(s_error2.GetPayload("Error key"), "bar"); @@ -79,22 +79,22 @@ TEST(Status, ErrorStatusInsertPayloadsFromErrorStatus) { #if defined(PLATFORM_GOOGLE) -Status GetError() { +absl::Status GetError() { return absl::InvalidArgumentError("An invalid argument error"); } -Status PropagateError() { +absl::Status PropagateError() { TF_RETURN_IF_ERROR(GetError()); return absl::OkStatus(); } -Status PropagateError2() { +absl::Status PropagateError2() { TF_RETURN_IF_ERROR(PropagateError()); return absl::OkStatus(); } TEST(Status, StackTracePropagation) { - Status s = PropagateError2(); + absl::Status s = PropagateError2(); auto sources = s.GetSourceLocations(); ASSERT_EQ(sources.size(), 3); @@ -105,16 +105,16 @@ TEST(Status, StackTracePropagation) { } TEST(Status, SourceLocationsPreservedByAppend) { - Status s = PropagateError2(); + absl::Status s = PropagateError2(); ASSERT_EQ(s.GetSourceLocations().size(), 3); errors::AppendToMessage(&s, "A new message."); ASSERT_EQ(s.GetSourceLocations().size(), 3); } TEST(Status, SourceLocationsPreservedByUpdate) { - Status s = PropagateError2(); + absl::Status s = PropagateError2(); ASSERT_EQ(s.GetSourceLocations().size(), 3); - Status s2 = errors::CreateWithUpdatedMessage(s, "New message."); + absl::Status s2 = errors::CreateWithUpdatedMessage(s, "New message."); ASSERT_EQ(s2.GetSourceLocations().size(), 3); } diff --git a/third_party/xla/third_party/tsl/tsl/platform/file_system.cc b/third_party/xla/third_party/tsl/tsl/platform/file_system.cc index 022782ee47c5e7..cbca921d50b545 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/file_system.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/file_system.cc @@ -74,31 +74,33 @@ string FileSystem::TranslateName(const string& name) const { return this->CleanPath(path); } -Status FileSystem::IsDirectory(const string& name, TransactionToken* token) { +absl::Status FileSystem::IsDirectory(const string& name, + TransactionToken* token) { // Check if path exists. // TODO(sami):Forward token to other methods once migration is complete. TF_RETURN_IF_ERROR(FileExists(name)); FileStatistics stat; TF_RETURN_IF_ERROR(Stat(name, &stat)); if (stat.is_directory) { - return OkStatus(); + return absl::OkStatus(); } - return Status(absl::StatusCode::kFailedPrecondition, "Not a directory"); + return absl::Status(absl::StatusCode::kFailedPrecondition, "Not a directory"); } -Status FileSystem::HasAtomicMove(const string& path, bool* has_atomic_move) { +absl::Status FileSystem::HasAtomicMove(const string& path, + bool* has_atomic_move) { *has_atomic_move = true; - return OkStatus(); + return absl::OkStatus(); } void FileSystem::FlushCaches(TransactionToken* token) {} bool FileSystem::FilesExist(const std::vector& files, TransactionToken* token, - std::vector* status) { + std::vector* status) { bool result = true; for (const auto& file : files) { - Status s = FileExists(file); + absl::Status s = FileExists(file); result &= s.ok(); if (status != nullptr) { status->push_back(s); @@ -110,17 +112,17 @@ bool FileSystem::FilesExist(const std::vector& files, return result; } -Status FileSystem::DeleteRecursively(const string& dirname, - TransactionToken* token, - int64_t* undeleted_files, - int64_t* undeleted_dirs) { +absl::Status FileSystem::DeleteRecursively(const string& dirname, + TransactionToken* token, + int64_t* undeleted_files, + int64_t* undeleted_dirs) { CHECK_NOTNULL(undeleted_files); CHECK_NOTNULL(undeleted_dirs); *undeleted_files = 0; *undeleted_dirs = 0; // Make sure that dirname exists; - Status exists_status = FileExists(dirname); + absl::Status exists_status = FileExists(dirname); if (!exists_status.ok()) { (*undeleted_dirs)++; return exists_status; @@ -128,7 +130,7 @@ Status FileSystem::DeleteRecursively(const string& dirname, // If given path to a single file, we should just delete it. if (!IsDirectory(dirname).ok()) { - Status delete_root_status = DeleteFile(dirname); + absl::Status delete_root_status = DeleteFile(dirname); if (!delete_root_status.ok()) (*undeleted_files)++; return delete_root_status; } @@ -136,7 +138,7 @@ Status FileSystem::DeleteRecursively(const string& dirname, std::deque dir_q; // Queue for the BFS std::vector dir_list; // List of all dirs discovered dir_q.push_back(dirname); - Status ret; // Status to be returned. + absl::Status ret; // Status to be returned. // Do a BFS on the directory to discover all the sub-directories. Remove all // children that are files along the way. Then cleanup and remove the // directories in reverse order.; @@ -146,7 +148,7 @@ Status FileSystem::DeleteRecursively(const string& dirname, dir_list.push_back(dir); std::vector children; // GetChildren might fail if we don't have appropriate permissions. - Status s = GetChildren(dir, &children); + absl::Status s = GetChildren(dir, &children); ret.Update(s); if (!s.ok()) { (*undeleted_dirs)++; @@ -160,7 +162,7 @@ Status FileSystem::DeleteRecursively(const string& dirname, } else { // Delete file might fail because of permissions issues or might be // unimplemented. - Status del_status = DeleteFile(child_path); + absl::Status del_status = DeleteFile(child_path); ret.Update(del_status); if (!del_status.ok()) { (*undeleted_files)++; @@ -174,7 +176,7 @@ Status FileSystem::DeleteRecursively(const string& dirname, for (const string& dir : dir_list) { // Delete dir might fail because of permissions issues or might be // unimplemented. - Status s = DeleteDir(dir); + absl::Status s = DeleteDir(dir); ret.Update(s); if (!s.ok()) { (*undeleted_dirs)++; @@ -183,19 +185,19 @@ Status FileSystem::DeleteRecursively(const string& dirname, return ret; } -Status FileSystem::RecursivelyCreateDir(const string& dirname, - TransactionToken* token) { +absl::Status FileSystem::RecursivelyCreateDir(const string& dirname, + TransactionToken* token) { StringPiece scheme, host, remaining_dir; this->ParseURI(dirname, &scheme, &host, &remaining_dir); std::vector sub_dirs; while (!remaining_dir.empty()) { std::string current_entry = this->CreateURI(scheme, host, remaining_dir); - Status exists_status = FileExists(current_entry); + absl::Status exists_status = FileExists(current_entry); if (exists_status.ok()) { // FileExists cannot differentiate between existence of a file or a // directory, hence we need an additional test as we must not assume that // a path to a file is a path to a parent directory. - Status directory_status = IsDirectory(current_entry); + absl::Status directory_status = IsDirectory(current_entry); if (directory_status.ok()) { break; // We need to start creating directories from here. } else if (directory_status.code() == absl::StatusCode::kUnimplemented) { @@ -221,16 +223,16 @@ Status FileSystem::RecursivelyCreateDir(const string& dirname, string built_path(remaining_dir); for (const StringPiece sub_dir : sub_dirs) { built_path = this->JoinPath(built_path, sub_dir); - Status status = CreateDir(this->CreateURI(scheme, host, built_path)); + absl::Status status = CreateDir(this->CreateURI(scheme, host, built_path)); if (!status.ok() && status.code() != absl::StatusCode::kAlreadyExists) { return status; } } - return OkStatus(); + return absl::OkStatus(); } -Status FileSystem::CopyFile(const string& src, const string& target, - TransactionToken* token) { +absl::Status FileSystem::CopyFile(const string& src, const string& target, + TransactionToken* token) { return FileSystemCopyFile(this, src, this, target); } diff --git a/third_party/xla/third_party/tsl/tsl/platform/file_system.h b/third_party/xla/third_party/tsl/tsl/platform/file_system.h index 8f7bd875e35bc3..a25a4760357ca6 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/file_system.h +++ b/third_party/xla/third_party/tsl/tsl/platform/file_system.h @@ -69,17 +69,17 @@ class FileSystem { /// /// The ownership of the returned RandomAccessFile is passed to the caller /// and the object should be deleted when is not used. - virtual tsl::Status NewRandomAccessFile( + virtual absl::Status NewRandomAccessFile( const std::string& fname, std::unique_ptr* result) { return NewRandomAccessFile(fname, nullptr, result); } - virtual tsl::Status NewRandomAccessFile( + virtual absl::Status NewRandomAccessFile( const std::string& fname, TransactionToken* token, std::unique_ptr* result) { // We duplicate these methods due to Google internal coding style prevents // virtual functions with default arguments. See PR #41615. - return OkStatus(); + return absl::OkStatus(); } /// \brief Creates an object that writes to a new file with the specified @@ -94,15 +94,15 @@ class FileSystem { /// /// The ownership of the returned WritableFile is passed to the caller /// and the object should be deleted when is not used. - virtual tsl::Status NewWritableFile(const std::string& fname, - std::unique_ptr* result) { + virtual absl::Status NewWritableFile(const std::string& fname, + std::unique_ptr* result) { return NewWritableFile(fname, nullptr, result); } - virtual tsl::Status NewWritableFile(const std::string& fname, - TransactionToken* token, - std::unique_ptr* result) { - return OkStatus(); + virtual absl::Status NewWritableFile(const std::string& fname, + TransactionToken* token, + std::unique_ptr* result) { + return absl::OkStatus(); } /// \brief Creates an object that either appends to an existing file, or @@ -116,15 +116,15 @@ class FileSystem { /// /// The ownership of the returned WritableFile is passed to the caller /// and the object should be deleted when is not used. - virtual tsl::Status NewAppendableFile(const std::string& fname, - std::unique_ptr* result) { + virtual absl::Status NewAppendableFile( + const std::string& fname, std::unique_ptr* result) { return NewAppendableFile(fname, nullptr, result); } - virtual tsl::Status NewAppendableFile(const std::string& fname, - TransactionToken* token, - std::unique_ptr* result) { - return OkStatus(); + virtual absl::Status NewAppendableFile( + const std::string& fname, TransactionToken* token, + std::unique_ptr* result) { + return absl::OkStatus(); } /// \brief Creates a readonly region of memory with the file context. @@ -137,50 +137,51 @@ class FileSystem { /// /// The ownership of the returned ReadOnlyMemoryRegion is passed to the caller /// and the object should be deleted when is not used. - virtual tsl::Status NewReadOnlyMemoryRegionFromFile( + virtual absl::Status NewReadOnlyMemoryRegionFromFile( const std::string& fname, std::unique_ptr* result) { return NewReadOnlyMemoryRegionFromFile(fname, nullptr, result); } - virtual tsl::Status NewReadOnlyMemoryRegionFromFile( + virtual absl::Status NewReadOnlyMemoryRegionFromFile( const std::string& fname, TransactionToken* token, std::unique_ptr* result) { - return OkStatus(); + return absl::OkStatus(); } /// Returns OK if the named path exists and NOT_FOUND otherwise. - virtual tsl::Status FileExists(const std::string& fname) { + virtual absl::Status FileExists(const std::string& fname) { return FileExists(fname, nullptr); } - virtual tsl::Status FileExists(const std::string& fname, - TransactionToken* token) { - return OkStatus(); + virtual absl::Status FileExists(const std::string& fname, + TransactionToken* token) { + return absl::OkStatus(); } /// Returns true if all the listed files exist, false otherwise. /// if status is not null, populate the vector with a detailed status /// for each file. virtual bool FilesExist(const std::vector& files, - std::vector* status) { + std::vector* status) { return FilesExist(files, nullptr, status); } virtual bool FilesExist(const std::vector& files, - TransactionToken* token, std::vector* status); + TransactionToken* token, + std::vector* status); /// \brief Returns the immediate children in the given directory. /// /// The returned paths are relative to 'dir'. - virtual tsl::Status GetChildren(const std::string& dir, - std::vector* result) { + virtual absl::Status GetChildren(const std::string& dir, + std::vector* result) { return GetChildren(dir, nullptr, result); } - virtual tsl::Status GetChildren(const std::string& dir, - TransactionToken* token, - std::vector* result) { - return OkStatus(); + virtual absl::Status GetChildren(const std::string& dir, + TransactionToken* token, + std::vector* result) { + return absl::OkStatus(); } /// \brief Given a pattern, stores in *results the set of paths that matches @@ -205,15 +206,15 @@ class FileSystem { /// * OK - no errors /// * UNIMPLEMENTED - Some underlying functions (like GetChildren) are not /// implemented - virtual tsl::Status GetMatchingPaths(const std::string& pattern, - std::vector* results) { + virtual absl::Status GetMatchingPaths(const std::string& pattern, + std::vector* results) { return GetMatchingPaths(pattern, nullptr, results); } - virtual tsl::Status GetMatchingPaths(const std::string& pattern, - TransactionToken* token, - std::vector* results) { - return OkStatus(); + virtual absl::Status GetMatchingPaths(const std::string& pattern, + TransactionToken* token, + std::vector* results) { + return absl::OkStatus(); } /// \brief Checks if the given filename matches the pattern. @@ -224,23 +225,23 @@ class FileSystem { virtual bool Match(const std::string& filename, const std::string& pattern); /// \brief Obtains statistics for the given path. - virtual tsl::Status Stat(const std::string& fname, FileStatistics* stat) { + virtual absl::Status Stat(const std::string& fname, FileStatistics* stat) { return Stat(fname, nullptr, stat); } - virtual tsl::Status Stat(const std::string& fname, TransactionToken* token, - FileStatistics* stat) { - return OkStatus(); + virtual absl::Status Stat(const std::string& fname, TransactionToken* token, + FileStatistics* stat) { + return absl::OkStatus(); } /// \brief Deletes the named file. - virtual tsl::Status DeleteFile(const std::string& fname) { + virtual absl::Status DeleteFile(const std::string& fname) { return DeleteFile(fname, nullptr); } - virtual tsl::Status DeleteFile(const std::string& fname, - TransactionToken* token) { - return OkStatus(); + virtual absl::Status DeleteFile(const std::string& fname, + TransactionToken* token) { + return absl::OkStatus(); } /// \brief Creates the specified directory. @@ -248,13 +249,13 @@ class FileSystem { /// * OK - successfully created the directory. /// * ALREADY_EXISTS - directory with name dirname already exists. /// * PERMISSION_DENIED - dirname is not writable. - virtual tsl::Status CreateDir(const std::string& dirname) { + virtual absl::Status CreateDir(const std::string& dirname) { return CreateDir(dirname, nullptr); } - virtual tsl::Status CreateDir(const std::string& dirname, - TransactionToken* token) { - return OkStatus(); + virtual absl::Status CreateDir(const std::string& dirname, + TransactionToken* token) { + return absl::OkStatus(); } /// \brief Creates the specified directory and all the necessary @@ -263,21 +264,21 @@ class FileSystem { /// * OK - successfully created the directory and sub directories, even if /// they were already created. /// * PERMISSION_DENIED - dirname or some subdirectory is not writable. - virtual tsl::Status RecursivelyCreateDir(const std::string& dirname) { + virtual absl::Status RecursivelyCreateDir(const std::string& dirname) { return RecursivelyCreateDir(dirname, nullptr); } - virtual tsl::Status RecursivelyCreateDir(const std::string& dirname, - TransactionToken* token); + virtual absl::Status RecursivelyCreateDir(const std::string& dirname, + TransactionToken* token); /// \brief Deletes the specified directory. - virtual tsl::Status DeleteDir(const std::string& dirname) { + virtual absl::Status DeleteDir(const std::string& dirname) { return DeleteDir(dirname, nullptr); } - virtual tsl::Status DeleteDir(const std::string& dirname, - TransactionToken* token) { - return OkStatus(); + virtual absl::Status DeleteDir(const std::string& dirname, + TransactionToken* token) { + return absl::OkStatus(); } /// \brief Deletes the specified directory and all subdirectories and files @@ -304,48 +305,49 @@ class FileSystem { /// * PERMISSION_DENIED - dirname or some descendant is not writable /// * UNIMPLEMENTED - Some underlying functions (like Delete) are not /// implemented - virtual tsl::Status DeleteRecursively(const std::string& dirname, - int64_t* undeleted_files, - int64_t* undeleted_dirs) { + virtual absl::Status DeleteRecursively(const std::string& dirname, + int64_t* undeleted_files, + int64_t* undeleted_dirs) { return DeleteRecursively(dirname, nullptr, undeleted_files, undeleted_dirs); } - virtual tsl::Status DeleteRecursively(const std::string& dirname, - TransactionToken* token, - int64_t* undeleted_files, - int64_t* undeleted_dirs); + virtual absl::Status DeleteRecursively(const std::string& dirname, + TransactionToken* token, + int64_t* undeleted_files, + int64_t* undeleted_dirs); /// \brief Stores the size of `fname` in `*file_size`. - virtual tsl::Status GetFileSize(const std::string& fname, uint64* file_size) { + virtual absl::Status GetFileSize(const std::string& fname, + uint64* file_size) { return GetFileSize(fname, nullptr, file_size); } - virtual tsl::Status GetFileSize(const std::string& fname, - TransactionToken* token, uint64* file_size) { - return OkStatus(); + virtual absl::Status GetFileSize(const std::string& fname, + TransactionToken* token, uint64* file_size) { + return absl::OkStatus(); } /// \brief Overwrites the target if it exists. - virtual tsl::Status RenameFile(const std::string& src, - const std::string& target) { + virtual absl::Status RenameFile(const std::string& src, + const std::string& target) { return RenameFile(src, target, nullptr); } - virtual tsl::Status RenameFile(const std::string& src, - const std::string& target, - TransactionToken* token) { - return OkStatus(); + virtual absl::Status RenameFile(const std::string& src, + const std::string& target, + TransactionToken* token) { + return absl::OkStatus(); } /// \brief Copy the src to target. - virtual tsl::Status CopyFile(const std::string& src, - const std::string& target) { + virtual absl::Status CopyFile(const std::string& src, + const std::string& target) { return CopyFile(src, target, nullptr); } - virtual tsl::Status CopyFile(const std::string& src, - const std::string& target, - TransactionToken* token); + virtual absl::Status CopyFile(const std::string& src, + const std::string& target, + TransactionToken* token); /// \brief Translate an URI to a filename for the FileSystem implementation. /// @@ -365,12 +367,12 @@ class FileSystem { /// * NOT_FOUND - The path entry does not exist. /// * PERMISSION_DENIED - Insufficient permissions. /// * UNIMPLEMENTED - The file factory doesn't support directories. - virtual tsl::Status IsDirectory(const std::string& fname) { + virtual absl::Status IsDirectory(const std::string& fname) { return IsDirectory(fname, nullptr); } - virtual tsl::Status IsDirectory(const std::string& fname, - TransactionToken* token); + virtual absl::Status IsDirectory(const std::string& fname, + TransactionToken* token); /// \brief Returns whether the given path is on a file system /// that has atomic move capabilities. This can be used @@ -382,7 +384,8 @@ class FileSystem { /// so has_atomic_move holds the above information. /// * UNIMPLEMENTED - The file system of the path hasn't been implemented in /// TF - virtual Status HasAtomicMove(const std::string& path, bool* has_atomic_move); + virtual absl::Status HasAtomicMove(const std::string& path, + bool* has_atomic_move); /// \brief Flushes any cached filesystem objects from memory. virtual void FlushCaches() { FlushCaches(nullptr); } @@ -476,67 +479,67 @@ class FileSystem { // Transaction related API /// \brief Starts a new transaction - virtual tsl::Status StartTransaction(TransactionToken** token) { + virtual absl::Status StartTransaction(TransactionToken** token) { *token = nullptr; - return OkStatus(); + return absl::OkStatus(); } /// \brief Adds `path` to transaction in `token` - virtual tsl::Status AddToTransaction(const std::string& path, - TransactionToken* token) { - return OkStatus(); + virtual absl::Status AddToTransaction(const std::string& path, + TransactionToken* token) { + return absl::OkStatus(); } /// \brief Ends transaction - virtual tsl::Status EndTransaction(TransactionToken* token) { - return OkStatus(); + virtual absl::Status EndTransaction(TransactionToken* token) { + return absl::OkStatus(); } /// \brief Get token for `path` or start a new transaction and add `path` to /// it. - virtual tsl::Status GetTokenOrStartTransaction(const std::string& path, - TransactionToken** token) { + virtual absl::Status GetTokenOrStartTransaction(const std::string& path, + TransactionToken** token) { *token = nullptr; - return OkStatus(); + return absl::OkStatus(); } /// \brief Return transaction for `path` or nullptr in `token` - virtual tsl::Status GetTransactionForPath(const std::string& path, - TransactionToken** token) { + virtual absl::Status GetTransactionForPath(const std::string& path, + TransactionToken** token) { *token = nullptr; - return OkStatus(); + return absl::OkStatus(); } /// \brief Decode transaction to human readable string. virtual std::string DecodeTransaction(const TransactionToken* token); /// \brief Set File System Configuration Options - virtual Status SetOption(const string& key, const string& value) { + virtual absl::Status SetOption(const string& key, const string& value) { return errors::Unimplemented("SetOption"); } /// \brief Set File System Configuration Option - virtual tsl::Status SetOption(const std::string& name, - const std::vector& values) { + virtual absl::Status SetOption(const std::string& name, + const std::vector& values) { return errors::Unimplemented("SetOption"); } /// \brief Set File System Configuration Option - virtual tsl::Status SetOption(const std::string& name, - const std::vector& values) { + virtual absl::Status SetOption(const std::string& name, + const std::vector& values) { return errors::Unimplemented("SetOption"); } /// \brief Set File System Configuration Option - virtual tsl::Status SetOption(const std::string& name, - const std::vector& values) { + virtual absl::Status SetOption(const std::string& name, + const std::vector& values) { return errors::Unimplemented("SetOption"); } /// \brief Set File System ACL checker. /// /// No checks are enforced if a FileAcl is never set. - virtual tsl::Status SetFileAcl(std::shared_ptr file_acl) { + virtual absl::Status SetFileAcl(std::shared_ptr file_acl) { return errors::Unimplemented("SetFileAcl"); } @@ -581,48 +584,49 @@ class WrappedFileSystem : public FileSystem { public: TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT; - tsl::Status NewRandomAccessFile( + absl::Status NewRandomAccessFile( const std::string& fname, TransactionToken* token, std::unique_ptr* result) override { return fs_->NewRandomAccessFile(fname, (token ? token : token_), result); } - tsl::Status NewWritableFile(const std::string& fname, TransactionToken* token, - std::unique_ptr* result) override { + absl::Status NewWritableFile(const std::string& fname, + TransactionToken* token, + std::unique_ptr* result) override { return fs_->NewWritableFile(fname, (token ? token : token_), result); } - tsl::Status NewAppendableFile( + absl::Status NewAppendableFile( const std::string& fname, TransactionToken* token, std::unique_ptr* result) override { return fs_->NewAppendableFile(fname, (token ? token : token_), result); } - tsl::Status NewReadOnlyMemoryRegionFromFile( + absl::Status NewReadOnlyMemoryRegionFromFile( const std::string& fname, TransactionToken* token, std::unique_ptr* result) override { return fs_->NewReadOnlyMemoryRegionFromFile(fname, (token ? token : token_), result); } - tsl::Status FileExists(const std::string& fname, - TransactionToken* token) override { + absl::Status FileExists(const std::string& fname, + TransactionToken* token) override { return fs_->FileExists(fname, (token ? token : token_)); } bool FilesExist(const std::vector& files, TransactionToken* token, - std::vector* status) override { + std::vector* status) override { return fs_->FilesExist(files, (token ? token : token_), status); } - tsl::Status GetChildren(const std::string& dir, TransactionToken* token, - std::vector* result) override { + absl::Status GetChildren(const std::string& dir, TransactionToken* token, + std::vector* result) override { return fs_->GetChildren(dir, (token ? token : token_), result); } - tsl::Status GetMatchingPaths(const std::string& pattern, - TransactionToken* token, - std::vector* results) override { + absl::Status GetMatchingPaths(const std::string& pattern, + TransactionToken* token, + std::vector* results) override { return fs_->GetMatchingPaths(pattern, (token ? token : token_), results); } @@ -630,51 +634,51 @@ class WrappedFileSystem : public FileSystem { return fs_->Match(filename, pattern); } - tsl::Status Stat(const std::string& fname, TransactionToken* token, - FileStatistics* stat) override { + absl::Status Stat(const std::string& fname, TransactionToken* token, + FileStatistics* stat) override { return fs_->Stat(fname, (token ? token : token_), stat); } - tsl::Status DeleteFile(const std::string& fname, - TransactionToken* token) override { + absl::Status DeleteFile(const std::string& fname, + TransactionToken* token) override { return fs_->DeleteFile(fname, (token ? token : token_)); } - tsl::Status CreateDir(const std::string& dirname, - TransactionToken* token) override { + absl::Status CreateDir(const std::string& dirname, + TransactionToken* token) override { return fs_->CreateDir(dirname, (token ? token : token_)); } - tsl::Status RecursivelyCreateDir(const std::string& dirname, - TransactionToken* token) override { + absl::Status RecursivelyCreateDir(const std::string& dirname, + TransactionToken* token) override { return fs_->RecursivelyCreateDir(dirname, (token ? token : token_)); } - tsl::Status DeleteDir(const std::string& dirname, - TransactionToken* token) override { + absl::Status DeleteDir(const std::string& dirname, + TransactionToken* token) override { return fs_->DeleteDir(dirname, (token ? token : token_)); } - tsl::Status DeleteRecursively(const std::string& dirname, - TransactionToken* token, - int64_t* undeleted_files, - int64_t* undeleted_dirs) override { + absl::Status DeleteRecursively(const std::string& dirname, + TransactionToken* token, + int64_t* undeleted_files, + int64_t* undeleted_dirs) override { return fs_->DeleteRecursively(dirname, (token ? token : token_), undeleted_files, undeleted_dirs); } - tsl::Status GetFileSize(const std::string& fname, TransactionToken* token, - uint64* file_size) override { + absl::Status GetFileSize(const std::string& fname, TransactionToken* token, + uint64* file_size) override { return fs_->GetFileSize(fname, (token ? token : token_), file_size); } - tsl::Status RenameFile(const std::string& src, const std::string& target, - TransactionToken* token) override { + absl::Status RenameFile(const std::string& src, const std::string& target, + TransactionToken* token) override { return fs_->RenameFile(src, target, (token ? token : token_)); } - tsl::Status CopyFile(const std::string& src, const std::string& target, - TransactionToken* token) override { + absl::Status CopyFile(const std::string& src, const std::string& target, + TransactionToken* token) override { return fs_->CopyFile(src, target, (token ? token : token_)); } @@ -682,13 +686,13 @@ class WrappedFileSystem : public FileSystem { return fs_->TranslateName(name); } - tsl::Status IsDirectory(const std::string& fname, - TransactionToken* token) override { + absl::Status IsDirectory(const std::string& fname, + TransactionToken* token) override { return fs_->IsDirectory(fname, (token ? token : token_)); } - Status HasAtomicMove(const std::string& path, - bool* has_atomic_move) override { + absl::Status HasAtomicMove(const std::string& path, + bool* has_atomic_move) override { return fs_->HasAtomicMove(path, has_atomic_move); } @@ -702,26 +706,26 @@ class WrappedFileSystem : public FileSystem { return fs_->Basename(path); } - tsl::Status StartTransaction(TransactionToken** token) override { + absl::Status StartTransaction(TransactionToken** token) override { return fs_->StartTransaction(token); } - tsl::Status AddToTransaction(const std::string& path, - TransactionToken* token) override { + absl::Status AddToTransaction(const std::string& path, + TransactionToken* token) override { return fs_->AddToTransaction(path, (token ? token : token_)); } - tsl::Status EndTransaction(TransactionToken* token) override { + absl::Status EndTransaction(TransactionToken* token) override { return fs_->EndTransaction(token); } - tsl::Status GetTransactionForPath(const std::string& path, - TransactionToken** token) override { + absl::Status GetTransactionForPath(const std::string& path, + TransactionToken** token) override { return fs_->GetTransactionForPath(path, token); } - tsl::Status GetTokenOrStartTransaction(const std::string& path, - TransactionToken** token) override { + absl::Status GetTokenOrStartTransaction(const std::string& path, + TransactionToken** token) override { return fs_->GetTokenOrStartTransaction(path, token); } @@ -749,7 +753,7 @@ class RandomAccessFile { /// /// This is an optional operation that may not be implemented by every /// filesystem. - virtual tsl::Status Name(StringPiece* result) const { + virtual absl::Status Name(StringPiece* result) const { return errors::Unimplemented("This filesystem does not support Name()"); } @@ -768,12 +772,12 @@ class RandomAccessFile { /// because of EOF. /// /// Safe for concurrent use by multiple threads. - virtual tsl::Status Read(uint64 offset, size_t n, StringPiece* result, - char* scratch) const = 0; + virtual absl::Status Read(uint64 offset, size_t n, StringPiece* result, + char* scratch) const = 0; #if defined(TF_CORD_SUPPORT) /// \brief Read up to `n` bytes from the file starting at `offset`. - virtual tsl::Status Read(uint64 offset, size_t n, absl::Cord* cord) const { + virtual absl::Status Read(uint64 offset, size_t n, absl::Cord* cord) const { return errors::Unimplemented( "Read(uint64, size_t, absl::Cord*) is not " "implemented"); @@ -795,15 +799,15 @@ class WritableFile { virtual ~WritableFile() = default; /// \brief Append 'data' to the file. - virtual tsl::Status Append(StringPiece data) = 0; + virtual absl::Status Append(StringPiece data) = 0; #if defined(TF_CORD_SUPPORT) // \brief Append 'data' to the file. - virtual tsl::Status Append(const absl::Cord& cord) { + virtual absl::Status Append(const absl::Cord& cord) { for (StringPiece chunk : cord.Chunks()) { TF_RETURN_IF_ERROR(Append(chunk)); } - return OkStatus(); + return absl::OkStatus(); } #endif @@ -814,7 +818,7 @@ class WritableFile { /// Typical return codes (not guaranteed to be exhaustive): /// * OK /// * Other codes, as returned from Flush() - virtual tsl::Status Close() = 0; + virtual absl::Status Close() = 0; /// \brief Flushes the file and optionally syncs contents to filesystem. /// @@ -826,13 +830,13 @@ class WritableFile { /// eventually flush the contents. If the OS or machine crashes /// after a successful flush, the contents may or may not be /// persisted, depending on the implementation. - virtual tsl::Status Flush() = 0; + virtual absl::Status Flush() = 0; // \brief Returns the name of the file. /// /// This is an optional operation that may not be implemented by every /// filesystem. - virtual tsl::Status Name(StringPiece* result) const { + virtual absl::Status Name(StringPiece* result) const { return errors::Unimplemented("This filesystem does not support Name()"); } @@ -842,14 +846,14 @@ class WritableFile { /// of the file have been persisted to the filesystem; if the OS /// or machine crashes after a successful Sync, the contents should /// be properly saved. - virtual tsl::Status Sync() = 0; + virtual absl::Status Sync() = 0; /// \brief Retrieves the current write position in the file, or -1 on /// error. /// /// This is an optional operation, subclasses may choose to return /// errors::Unimplemented. - virtual tsl::Status Tell(int64_t* position) { + virtual absl::Status Tell(int64_t* position) { *position = -1; return errors::Unimplemented("This filesystem does not support Tell()"); } @@ -903,11 +907,11 @@ class FileSystemRegistry { typedef std::function Factory; virtual ~FileSystemRegistry() = default; - virtual tsl::Status Register(const std::string& scheme, Factory factory) = 0; - virtual tsl::Status Register(const std::string& scheme, - std::unique_ptr filesystem) = 0; + virtual absl::Status Register(const std::string& scheme, Factory factory) = 0; + virtual absl::Status Register(const std::string& scheme, + std::unique_ptr filesystem) = 0; virtual FileSystem* Lookup(const std::string& scheme) = 0; - virtual tsl::Status GetRegisteredFileSystemSchemes( + virtual absl::Status GetRegisteredFileSystemSchemes( std::vector* schemes) = 0; }; diff --git a/third_party/xla/third_party/tsl/tsl/platform/file_system_helper.cc b/third_party/xla/third_party/tsl/tsl/platform/file_system_helper.cc index bc132ba02a56b5..04dc6a0420516c 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/file_system_helper.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/file_system_helper.cc @@ -121,18 +121,19 @@ static inline int GetFirstGlobbingEntry(const std::vector& dirs) { } // namespace -Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, - std::vector* results) { +absl::Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, + std::vector* results) { // Check that `fs`, `env` and `results` are non-null. if (fs == nullptr || env == nullptr || results == nullptr) { - return Status(absl::StatusCode::kInvalidArgument, - "Filesystem calls GetMatchingPaths with nullptr arguments"); + return absl::Status( + absl::StatusCode::kInvalidArgument, + "Filesystem calls GetMatchingPaths with nullptr arguments"); } // By design, we don't match anything on empty pattern results->clear(); if (pattern.empty()) { - return OkStatus(); + return absl::OkStatus(); } // The pattern can contain globbing characters at multiple levels, e.g.: @@ -155,7 +156,7 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, if (fs->FileExists(pattern).ok()) { results->emplace_back(pattern); } - return OkStatus(); + return absl::OkStatus(); } // To expand the globbing, we do a BFS from `dirs[matching_index-1]`. @@ -205,7 +206,7 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, // Get all children of `parent`. If this fails, return early. std::vector children; - Status s = fs->GetChildren(parent, &children); + absl::Status s = fs->GetChildren(parent, &children); if (s.code() == absl::StatusCode::kPermissionDenied) { return; } @@ -220,13 +221,13 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, // We also check that children match the pattern in parallel, for speedup. // We store the status of the match and `IsDirectory` in // `children_status` array, one element for each children. - std::vector children_status(children.size()); + std::vector children_status(children.size()); auto handle_children = [&fs, &match_pattern, &parent, &children, &children_status](int j) { const std::string path = io::JoinPath(parent, children[j]); if (!fs->Match(path, match_pattern)) { - children_status[j] = - Status(absl::StatusCode::kCancelled, "Operation not needed"); + children_status[j] = absl::Status(absl::StatusCode::kCancelled, + "Operation not needed"); } else { children_status[j] = fs->IsDirectory(path); } @@ -263,11 +264,11 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, std::swap(expand_queue, next_expand_queue); } - return OkStatus(); + return absl::OkStatus(); } -StatusOr FileExists(Env* env, const string& fname) { - Status status = env->FileExists(fname); +absl::StatusOr FileExists(Env* env, const string& fname) { + absl::Status status = env->FileExists(fname); if (errors::IsNotFound(status)) { return false; } diff --git a/third_party/xla/third_party/tsl/tsl/platform/file_system_helper.h b/third_party/xla/third_party/tsl/tsl/platform/file_system_helper.h index 5cc2cdc5a1e898..e9e7df6aa68907 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/file_system_helper.h +++ b/third_party/xla/third_party/tsl/tsl/platform/file_system_helper.h @@ -44,8 +44,8 @@ namespace internal { // results: will be cleared and may not be null. // // Returns an error status if any call to 'fs' failed. -Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, - std::vector* results); +absl::Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, + std::vector* results); // Given a file path, determines whether the file exists. This helper simplifies // the use of Env::FileExists. @@ -56,7 +56,7 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, // // Returns true if the file exists, false if it does not exist, or an error // Status. -StatusOr FileExists(Env* env, const string& fname); +absl::StatusOr FileExists(Env* env, const string& fname); } // namespace internal } // namespace tsl diff --git a/third_party/xla/third_party/tsl/tsl/platform/null_file_system.h b/third_party/xla/third_party/tsl/tsl/platform/null_file_system.h index 77b8142ee357b7..c04d2c1f0d6056 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/null_file_system.h +++ b/third_party/xla/third_party/tsl/tsl/platform/null_file_system.h @@ -38,67 +38,72 @@ class NullFileSystem : public FileSystem { TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT; - Status NewRandomAccessFile( + absl::Status NewRandomAccessFile( const string& fname, TransactionToken* token, std::unique_ptr* result) override { return errors::Unimplemented("NewRandomAccessFile unimplemented"); } - Status NewWritableFile(const string& fname, TransactionToken* token, - std::unique_ptr* result) override { + absl::Status NewWritableFile(const string& fname, TransactionToken* token, + std::unique_ptr* result) override { return errors::Unimplemented("NewWritableFile unimplemented"); } - Status NewAppendableFile(const string& fname, TransactionToken* token, - std::unique_ptr* result) override { + absl::Status NewAppendableFile( + const string& fname, TransactionToken* token, + std::unique_ptr* result) override { return errors::Unimplemented("NewAppendableFile unimplemented"); } - Status NewReadOnlyMemoryRegionFromFile( + absl::Status NewReadOnlyMemoryRegionFromFile( const string& fname, TransactionToken* token, std::unique_ptr* result) override { return errors::Unimplemented( "NewReadOnlyMemoryRegionFromFile unimplemented"); } - Status FileExists(const string& fname, TransactionToken* token) override { + absl::Status FileExists(const string& fname, + TransactionToken* token) override { return errors::Unimplemented("FileExists unimplemented"); } - Status GetChildren(const string& dir, TransactionToken* token, - std::vector* result) override { + absl::Status GetChildren(const string& dir, TransactionToken* token, + std::vector* result) override { return errors::Unimplemented("GetChildren unimplemented"); } - Status GetMatchingPaths(const string& pattern, TransactionToken* token, - std::vector* results) override { + absl::Status GetMatchingPaths(const string& pattern, TransactionToken* token, + std::vector* results) override { return internal::GetMatchingPaths(this, Env::Default(), pattern, results); } - Status DeleteFile(const string& fname, TransactionToken* token) override { + absl::Status DeleteFile(const string& fname, + TransactionToken* token) override { return errors::Unimplemented("DeleteFile unimplemented"); } - Status CreateDir(const string& dirname, TransactionToken* token) override { + absl::Status CreateDir(const string& dirname, + TransactionToken* token) override { return errors::Unimplemented("CreateDir unimplemented"); } - Status DeleteDir(const string& dirname, TransactionToken* token) override { + absl::Status DeleteDir(const string& dirname, + TransactionToken* token) override { return errors::Unimplemented("DeleteDir unimplemented"); } - Status GetFileSize(const string& fname, TransactionToken* token, - uint64* file_size) override { + absl::Status GetFileSize(const string& fname, TransactionToken* token, + uint64* file_size) override { return errors::Unimplemented("GetFileSize unimplemented"); } - Status RenameFile(const string& src, const string& target, - TransactionToken* token) override { + absl::Status RenameFile(const string& src, const string& target, + TransactionToken* token) override { return errors::Unimplemented("RenameFile unimplemented"); } - Status Stat(const string& fname, TransactionToken* token, - FileStatistics* stat) override { + absl::Status Stat(const string& fname, TransactionToken* token, + FileStatistics* stat) override { return errors::Unimplemented("Stat unimplemented"); } }; diff --git a/third_party/xla/third_party/tsl/tsl/platform/ram_file_system.h b/third_party/xla/third_party/tsl/tsl/platform/ram_file_system.h index 1b51653b716c3e..245eacfe465daa 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/ram_file_system.h +++ b/third_party/xla/third_party/tsl/tsl/platform/ram_file_system.h @@ -49,13 +49,13 @@ class RamRandomAccessFile : public RandomAccessFile, public WritableFile { : name_(name), data_(cord) {} ~RamRandomAccessFile() override {} - Status Name(StringPiece* result) const override { + absl::Status Name(StringPiece* result) const override { *result = name_; - return OkStatus(); + return absl::OkStatus(); } - Status Read(uint64 offset, size_t n, StringPiece* result, - char* scratch) const override { + absl::Status Read(uint64 offset, size_t n, StringPiece* result, + char* scratch) const override { if (offset >= data_->size()) { return errors::OutOfRange(""); } @@ -72,26 +72,26 @@ class RamRandomAccessFile : public RandomAccessFile, public WritableFile { if (left < n) { return errors::OutOfRange(""); } - return OkStatus(); + return absl::OkStatus(); } - Status Append(StringPiece data) override { + absl::Status Append(StringPiece data) override { data_->append(data.data(), data.size()); - return OkStatus(); + return absl::OkStatus(); } #if defined(TF_CORD_SUPPORT) - Status Append(const absl::Cord& cord) override { + absl::Status Append(const absl::Cord& cord) override { data_->append(cord.char_begin(), cord.char_end()); - return OkStatus(); + return absl::OkStatus(); } #endif - Status Close() override { return OkStatus(); } - Status Flush() override { return OkStatus(); } - Status Sync() override { return OkStatus(); } + absl::Status Close() override { return absl::OkStatus(); } + absl::Status Flush() override { return absl::OkStatus(); } + absl::Status Sync() override { return absl::OkStatus(); } - Status Tell(int64_t* position) override { + absl::Status Tell(int64_t* position) override { *position = -1; return errors::Unimplemented("This filesystem does not support Tell()"); } @@ -107,7 +107,7 @@ class RamFileSystem : public FileSystem { public: TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT; - Status NewRandomAccessFile( + absl::Status NewRandomAccessFile( const std::string& fname_, TransactionToken* token, std::unique_ptr* result) override { mutex_lock m(mu_); @@ -121,11 +121,12 @@ class RamFileSystem : public FileSystem { } *result = std::unique_ptr( new RamRandomAccessFile(fname, fs_[fname])); - return OkStatus(); + return absl::OkStatus(); } - Status NewWritableFile(const std::string& fname_, TransactionToken* token, - std::unique_ptr* result) override { + absl::Status NewWritableFile(const std::string& fname_, + TransactionToken* token, + std::unique_ptr* result) override { mutex_lock m(mu_); auto fname = StripRamFsPrefix(fname_); @@ -137,11 +138,12 @@ class RamFileSystem : public FileSystem { } *result = std::unique_ptr( new RamRandomAccessFile(fname, fs_[fname])); - return OkStatus(); + return absl::OkStatus(); } - Status NewAppendableFile(const std::string& fname_, TransactionToken* token, - std::unique_ptr* result) override { + absl::Status NewAppendableFile( + const std::string& fname_, TransactionToken* token, + std::unique_ptr* result) override { mutex_lock m(mu_); auto fname = StripRamFsPrefix(fname_); @@ -153,25 +155,25 @@ class RamFileSystem : public FileSystem { } *result = std::unique_ptr( new RamRandomAccessFile(fname, fs_[fname])); - return OkStatus(); + return absl::OkStatus(); } - Status NewReadOnlyMemoryRegionFromFile( + absl::Status NewReadOnlyMemoryRegionFromFile( const std::string& fname, TransactionToken* token, std::unique_ptr* result) override { return errors::Unimplemented(""); } - Status FileExists(const std::string& fname_, - TransactionToken* token) override { + absl::Status FileExists(const std::string& fname_, + TransactionToken* token) override { FileStatistics stat; auto fname = StripRamFsPrefix(fname_); return Stat(fname, token, &stat); } - Status GetChildren(const std::string& dir_, TransactionToken* token, - std::vector* result) override { + absl::Status GetChildren(const std::string& dir_, TransactionToken* token, + std::vector* result) override { mutex_lock m(mu_); auto dir = StripRamFsPrefix(dir_); @@ -185,11 +187,12 @@ class RamFileSystem : public FileSystem { ++it; } - return OkStatus(); + return absl::OkStatus(); } - Status GetMatchingPaths(const std::string& pattern_, TransactionToken* token, - std::vector* results) override { + absl::Status GetMatchingPaths(const std::string& pattern_, + TransactionToken* token, + std::vector* results) override { mutex_lock m(mu_); auto pattern = StripRamFsPrefix(pattern_); @@ -199,11 +202,11 @@ class RamFileSystem : public FileSystem { results->push_back("ram://" + it->first); } } - return OkStatus(); + return absl::OkStatus(); } - Status Stat(const std::string& fname_, TransactionToken* token, - FileStatistics* stat) override { + absl::Status Stat(const std::string& fname_, TransactionToken* token, + FileStatistics* stat) override { mutex_lock m(mu_); auto fname = StripRamFsPrefix(fname_); @@ -216,30 +219,30 @@ class RamFileSystem : public FileSystem { stat->is_directory = false; stat->length = fs_[fname]->size(); stat->mtime_nsec = 0; - return OkStatus(); + return absl::OkStatus(); } stat->is_directory = true; stat->length = 0; stat->mtime_nsec = 0; - return OkStatus(); + return absl::OkStatus(); } - Status DeleteFile(const std::string& fname_, - TransactionToken* token) override { + absl::Status DeleteFile(const std::string& fname_, + TransactionToken* token) override { mutex_lock m(mu_); auto fname = StripRamFsPrefix(fname_); if (fs_.find(fname) != fs_.end()) { fs_.erase(fname); - return OkStatus(); + return absl::OkStatus(); } return errors::NotFound(""); } - Status CreateDir(const std::string& dirname_, - TransactionToken* token) override { + absl::Status CreateDir(const std::string& dirname_, + TransactionToken* token) override { mutex_lock m(mu_); auto dirname = StripRamFsPrefix(dirname_); @@ -250,15 +253,15 @@ class RamFileSystem : public FileSystem { } fs_[dirname] = nullptr; - return OkStatus(); + return absl::OkStatus(); } - Status RecursivelyCreateDir(const std::string& dirname_, - TransactionToken* token) override { + absl::Status RecursivelyCreateDir(const std::string& dirname_, + TransactionToken* token) override { auto dirname = StripRamFsPrefix(dirname_); std::vector dirs = StrSplit(dirname, "/"); - Status last_status; + absl::Status last_status; std::string dir = dirs[0]; last_status = CreateDir(dir, token); @@ -269,8 +272,8 @@ class RamFileSystem : public FileSystem { return last_status; } - Status DeleteDir(const std::string& dirname_, - TransactionToken* token) override { + absl::Status DeleteDir(const std::string& dirname_, + TransactionToken* token) override { mutex_lock m(mu_); auto dirname = StripRamFsPrefix(dirname_); @@ -283,11 +286,11 @@ class RamFileSystem : public FileSystem { } fs_.erase(dirname); - return OkStatus(); + return absl::OkStatus(); } - Status GetFileSize(const std::string& fname_, TransactionToken* token, - uint64* file_size) override { + absl::Status GetFileSize(const std::string& fname_, TransactionToken* token, + uint64* file_size) override { mutex_lock m(mu_); auto fname = StripRamFsPrefix(fname_); @@ -296,13 +299,13 @@ class RamFileSystem : public FileSystem { return errors::InvalidArgument("Not a file"); } *file_size = fs_[fname]->size(); - return OkStatus(); + return absl::OkStatus(); } return errors::NotFound(""); } - Status RenameFile(const std::string& src_, const std::string& target_, - TransactionToken* token) override { + absl::Status RenameFile(const std::string& src_, const std::string& target_, + TransactionToken* token) override { mutex_lock m(mu_); auto src = StripRamFsPrefix(src_); auto target = StripRamFsPrefix(target_); @@ -310,7 +313,7 @@ class RamFileSystem : public FileSystem { if (fs_.find(src) != fs_.end()) { fs_[target] = fs_[src]; fs_.erase(fs_.find(src)); - return OkStatus(); + return absl::OkStatus(); } return errors::NotFound(""); } diff --git a/third_party/xla/third_party/tsl/tsl/platform/retrying_file_system.h b/third_party/xla/third_party/tsl/tsl/platform/retrying_file_system.h index 591423b4fe3ec7..3db1665a4f1867 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/retrying_file_system.h +++ b/third_party/xla/third_party/tsl/tsl/platform/retrying_file_system.h @@ -40,21 +40,23 @@ class RetryingFileSystem : public FileSystem { TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT; - Status NewRandomAccessFile( + absl::Status NewRandomAccessFile( const string& filename, TransactionToken* token, std::unique_ptr* result) override; - Status NewWritableFile(const string& filename, TransactionToken* token, - std::unique_ptr* result) override; + absl::Status NewWritableFile(const string& filename, TransactionToken* token, + std::unique_ptr* result) override; - Status NewAppendableFile(const string& filename, TransactionToken* token, - std::unique_ptr* result) override; + absl::Status NewAppendableFile( + const string& filename, TransactionToken* token, + std::unique_ptr* result) override; - Status NewReadOnlyMemoryRegionFromFile( + absl::Status NewReadOnlyMemoryRegionFromFile( const string& filename, TransactionToken* token, std::unique_ptr* result) override; - Status FileExists(const string& fname, TransactionToken* token) override { + absl::Status FileExists(const string& fname, + TransactionToken* token) override { return RetryingUtils::CallWithRetries( [this, &fname, token]() { return base_file_system_->FileExists(fname, token); @@ -62,8 +64,8 @@ class RetryingFileSystem : public FileSystem { retry_config_); } - Status GetChildren(const string& dir, TransactionToken* token, - std::vector* result) override { + absl::Status GetChildren(const string& dir, TransactionToken* token, + std::vector* result) override { return RetryingUtils::CallWithRetries( [this, &dir, result, token]() { return base_file_system_->GetChildren(dir, token, result); @@ -71,8 +73,8 @@ class RetryingFileSystem : public FileSystem { retry_config_); } - Status GetMatchingPaths(const string& pattern, TransactionToken* token, - std::vector* result) override { + absl::Status GetMatchingPaths(const string& pattern, TransactionToken* token, + std::vector* result) override { return RetryingUtils::CallWithRetries( [this, &pattern, result, token]() { return base_file_system_->GetMatchingPaths(pattern, token, result); @@ -80,8 +82,8 @@ class RetryingFileSystem : public FileSystem { retry_config_); } - Status Stat(const string& fname, TransactionToken* token, - FileStatistics* stat) override { + absl::Status Stat(const string& fname, TransactionToken* token, + FileStatistics* stat) override { return RetryingUtils::CallWithRetries( [this, &fname, stat, token]() { return base_file_system_->Stat(fname, token, stat); @@ -89,7 +91,8 @@ class RetryingFileSystem : public FileSystem { retry_config_); } - Status DeleteFile(const string& fname, TransactionToken* token) override { + absl::Status DeleteFile(const string& fname, + TransactionToken* token) override { return RetryingUtils::DeleteWithRetries( [this, &fname, token]() { return base_file_system_->DeleteFile(fname, token); @@ -97,7 +100,8 @@ class RetryingFileSystem : public FileSystem { retry_config_); } - Status CreateDir(const string& dirname, TransactionToken* token) override { + absl::Status CreateDir(const string& dirname, + TransactionToken* token) override { return RetryingUtils::CallWithRetries( [this, &dirname, token]() { return base_file_system_->CreateDir(dirname, token); @@ -105,7 +109,8 @@ class RetryingFileSystem : public FileSystem { retry_config_); } - Status DeleteDir(const string& dirname, TransactionToken* token) override { + absl::Status DeleteDir(const string& dirname, + TransactionToken* token) override { return RetryingUtils::DeleteWithRetries( [this, &dirname, token]() { return base_file_system_->DeleteDir(dirname, token); @@ -113,8 +118,8 @@ class RetryingFileSystem : public FileSystem { retry_config_); } - Status GetFileSize(const string& fname, TransactionToken* token, - uint64* file_size) override { + absl::Status GetFileSize(const string& fname, TransactionToken* token, + uint64* file_size) override { return RetryingUtils::CallWithRetries( [this, &fname, file_size, token]() { return base_file_system_->GetFileSize(fname, token, file_size); @@ -122,8 +127,8 @@ class RetryingFileSystem : public FileSystem { retry_config_); } - Status RenameFile(const string& src, const string& target, - TransactionToken* token) override { + absl::Status RenameFile(const string& src, const string& target, + TransactionToken* token) override { return RetryingUtils::CallWithRetries( [this, &src, &target, token]() { return base_file_system_->RenameFile(src, target, token); @@ -131,7 +136,8 @@ class RetryingFileSystem : public FileSystem { retry_config_); } - Status IsDirectory(const string& dirname, TransactionToken* token) override { + absl::Status IsDirectory(const string& dirname, + TransactionToken* token) override { return RetryingUtils::CallWithRetries( [this, &dirname, token]() { return base_file_system_->IsDirectory(dirname, token); @@ -139,14 +145,15 @@ class RetryingFileSystem : public FileSystem { retry_config_); } - Status HasAtomicMove(const string& path, bool* has_atomic_move) override { + absl::Status HasAtomicMove(const string& path, + bool* has_atomic_move) override { // this method does not need to be retried return base_file_system_->HasAtomicMove(path, has_atomic_move); } - Status DeleteRecursively(const string& dirname, TransactionToken* token, - int64_t* undeleted_files, - int64_t* undeleted_dirs) override { + absl::Status DeleteRecursively(const string& dirname, TransactionToken* token, + int64_t* undeleted_files, + int64_t* undeleted_dirs) override { return RetryingUtils::DeleteWithRetries( [this, &dirname, token, undeleted_files, undeleted_dirs]() { return base_file_system_->DeleteRecursively( @@ -177,12 +184,12 @@ class RetryingRandomAccessFile : public RandomAccessFile { const RetryConfig& retry_config) : base_file_(std::move(base_file)), retry_config_(retry_config) {} - Status Name(StringPiece* result) const override { + absl::Status Name(StringPiece* result) const override { return base_file_->Name(result); } - Status Read(uint64 offset, size_t n, StringPiece* result, - char* scratch) const override { + absl::Status Read(uint64 offset, size_t n, StringPiece* result, + char* scratch) const override { return RetryingUtils::CallWithRetries( [this, offset, n, result, scratch]() { return base_file_->Read(offset, n, result, scratch); @@ -206,26 +213,26 @@ class RetryingWritableFile : public WritableFile { Close().IgnoreError(); } - Status Append(StringPiece data) override { + absl::Status Append(StringPiece data) override { return RetryingUtils::CallWithRetries( [this, &data]() { return base_file_->Append(data); }, retry_config_); } - Status Close() override { + absl::Status Close() override { return RetryingUtils::CallWithRetries( [this]() { return base_file_->Close(); }, retry_config_); } - Status Flush() override { + absl::Status Flush() override { return RetryingUtils::CallWithRetries( [this]() { return base_file_->Flush(); }, retry_config_); } - Status Name(StringPiece* result) const override { + absl::Status Name(StringPiece* result) const override { return base_file_->Name(result); } - Status Sync() override { + absl::Status Sync() override { return RetryingUtils::CallWithRetries( [this]() { return base_file_->Sync(); }, retry_config_); } - Status Tell(int64_t* position) override { + absl::Status Tell(int64_t* position) override { return RetryingUtils::CallWithRetries( [this, &position]() { return base_file_->Tell(position); }, retry_config_); @@ -239,7 +246,7 @@ class RetryingWritableFile : public WritableFile { } // namespace retrying_internals template -Status RetryingFileSystem::NewRandomAccessFile( +absl::Status RetryingFileSystem::NewRandomAccessFile( const string& filename, TransactionToken* token, std::unique_ptr* result) { std::unique_ptr base_file; @@ -251,11 +258,11 @@ Status RetryingFileSystem::NewRandomAccessFile( retry_config_)); result->reset(new retrying_internals::RetryingRandomAccessFile( std::move(base_file), retry_config_)); - return OkStatus(); + return absl::OkStatus(); } template -Status RetryingFileSystem::NewWritableFile( +absl::Status RetryingFileSystem::NewWritableFile( const string& filename, TransactionToken* token, std::unique_ptr* result) { std::unique_ptr base_file; @@ -266,11 +273,11 @@ Status RetryingFileSystem::NewWritableFile( retry_config_)); result->reset(new retrying_internals::RetryingWritableFile( std::move(base_file), retry_config_)); - return OkStatus(); + return absl::OkStatus(); } template -Status RetryingFileSystem::NewAppendableFile( +absl::Status RetryingFileSystem::NewAppendableFile( const string& filename, TransactionToken* token, std::unique_ptr* result) { std::unique_ptr base_file; @@ -282,11 +289,11 @@ Status RetryingFileSystem::NewAppendableFile( retry_config_)); result->reset(new retrying_internals::RetryingWritableFile( std::move(base_file), retry_config_)); - return OkStatus(); + return absl::OkStatus(); } template -Status RetryingFileSystem::NewReadOnlyMemoryRegionFromFile( +absl::Status RetryingFileSystem::NewReadOnlyMemoryRegionFromFile( const string& filename, TransactionToken* token, std::unique_ptr* result) { return RetryingUtils::CallWithRetries( diff --git a/third_party/xla/third_party/tsl/tsl/platform/retrying_file_system_test.cc b/third_party/xla/third_party/tsl/tsl/platform/retrying_file_system_test.cc index 605ce2eb7ea3c3..522c59f565e0b1 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/retrying_file_system_test.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/retrying_file_system_test.cc @@ -24,7 +24,7 @@ limitations under the License. namespace tsl { namespace { -typedef std::vector> ExpectedCalls; +typedef std::vector> ExpectedCalls; ExpectedCalls CreateRetriableErrors(const string& method, int n) { ExpectedCalls expected_calls; @@ -47,7 +47,7 @@ class MockCallSequence { << "the next expected call: " << std::get<0>(calls_.front()); } - Status ConsumeNextCall(const string& method) { + absl::Status ConsumeNextCall(const string& method) { EXPECT_FALSE(calls_.empty()) << "No more calls were expected."; auto call = calls_.front(); calls_.erase(calls_.begin()); @@ -62,11 +62,11 @@ class MockCallSequence { class MockRandomAccessFile : public RandomAccessFile { public: explicit MockRandomAccessFile(const ExpectedCalls& calls) : calls_(calls) {} - Status Name(StringPiece* result) const override { + absl::Status Name(StringPiece* result) const override { return calls_.ConsumeNextCall("Name"); } - Status Read(uint64 offset, size_t n, StringPiece* result, - char* scratch) const override { + absl::Status Read(uint64 offset, size_t n, StringPiece* result, + char* scratch) const override { return calls_.ConsumeNextCall("Read"); } @@ -77,16 +77,16 @@ class MockRandomAccessFile : public RandomAccessFile { class MockWritableFile : public WritableFile { public: explicit MockWritableFile(const ExpectedCalls& calls) : calls_(calls) {} - Status Append(StringPiece data) override { + absl::Status Append(StringPiece data) override { return calls_.ConsumeNextCall("Append"); } - Status Close() override { return calls_.ConsumeNextCall("Close"); } - Status Flush() override { return calls_.ConsumeNextCall("Flush"); } - Status Name(StringPiece* result) const override { + absl::Status Close() override { return calls_.ConsumeNextCall("Close"); } + absl::Status Flush() override { return calls_.ConsumeNextCall("Flush"); } + absl::Status Name(StringPiece* result) const override { return calls_.ConsumeNextCall("Name"); } - Status Sync() override { return calls_.ConsumeNextCall("Sync"); } - Status Tell(int64_t* position) override { + absl::Status Sync() override { return calls_.ConsumeNextCall("Sync"); } + absl::Status Tell(int64_t* position) override { return calls_.ConsumeNextCall("Tell"); } @@ -101,79 +101,85 @@ class MockFileSystem : public FileSystem { TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT; - Status NewRandomAccessFile( + absl::Status NewRandomAccessFile( const string& fname, TransactionToken* token, std::unique_ptr* result) override { *result = std::move(random_access_file_to_return); return calls_.ConsumeNextCall("NewRandomAccessFile"); } - Status NewWritableFile(const string& fname, TransactionToken* token, - std::unique_ptr* result) override { + absl::Status NewWritableFile(const string& fname, TransactionToken* token, + std::unique_ptr* result) override { *result = std::move(writable_file_to_return); return calls_.ConsumeNextCall("NewWritableFile"); } - Status NewAppendableFile(const string& fname, TransactionToken* token, - std::unique_ptr* result) override { + absl::Status NewAppendableFile( + const string& fname, TransactionToken* token, + std::unique_ptr* result) override { *result = std::move(writable_file_to_return); return calls_.ConsumeNextCall("NewAppendableFile"); } - Status NewReadOnlyMemoryRegionFromFile( + absl::Status NewReadOnlyMemoryRegionFromFile( const string& fname, TransactionToken* token, std::unique_ptr* result) override { return calls_.ConsumeNextCall("NewReadOnlyMemoryRegionFromFile"); } - Status FileExists(const string& fname, TransactionToken* token) override { + absl::Status FileExists(const string& fname, + TransactionToken* token) override { return calls_.ConsumeNextCall("FileExists"); } - Status GetChildren(const string& dir, TransactionToken* token, - std::vector* result) override { + absl::Status GetChildren(const string& dir, TransactionToken* token, + std::vector* result) override { return calls_.ConsumeNextCall("GetChildren"); } - Status GetMatchingPaths(const string& dir, TransactionToken* token, - std::vector* result) override { + absl::Status GetMatchingPaths(const string& dir, TransactionToken* token, + std::vector* result) override { return calls_.ConsumeNextCall("GetMatchingPaths"); } - Status Stat(const string& fname, TransactionToken* token, - FileStatistics* stat) override { + absl::Status Stat(const string& fname, TransactionToken* token, + FileStatistics* stat) override { return calls_.ConsumeNextCall("Stat"); } - Status DeleteFile(const string& fname, TransactionToken* token) override { + absl::Status DeleteFile(const string& fname, + TransactionToken* token) override { return calls_.ConsumeNextCall("DeleteFile"); } - Status CreateDir(const string& dirname, TransactionToken* token) override { + absl::Status CreateDir(const string& dirname, + TransactionToken* token) override { return calls_.ConsumeNextCall("CreateDir"); } - Status DeleteDir(const string& dirname, TransactionToken* token) override { + absl::Status DeleteDir(const string& dirname, + TransactionToken* token) override { return calls_.ConsumeNextCall("DeleteDir"); } - Status GetFileSize(const string& fname, TransactionToken* token, - uint64* file_size) override { + absl::Status GetFileSize(const string& fname, TransactionToken* token, + uint64* file_size) override { return calls_.ConsumeNextCall("GetFileSize"); } - Status RenameFile(const string& src, const string& target, - TransactionToken* token) override { + absl::Status RenameFile(const string& src, const string& target, + TransactionToken* token) override { return calls_.ConsumeNextCall("RenameFile"); } - Status IsDirectory(const string& dirname, TransactionToken* token) override { + absl::Status IsDirectory(const string& dirname, + TransactionToken* token) override { return calls_.ConsumeNextCall("IsDirectory"); } - Status DeleteRecursively(const string& dirname, TransactionToken* token, - int64_t* undeleted_files, - int64_t* undeleted_dirs) override { + absl::Status DeleteRecursively(const string& dirname, TransactionToken* token, + int64_t* undeleted_files, + int64_t* undeleted_dirs) override { return calls_.ConsumeNextCall("DeleteRecursively"); } @@ -193,14 +199,15 @@ class MockFileSystem : public FileSystem { TEST(RetryingFileSystemTest, NewRandomAccessFile_ImmediateSuccess) { // Configure the mock base random access file. - ExpectedCalls expected_file_calls({std::make_tuple("Name", OkStatus()), - std::make_tuple("Read", OkStatus())}); + ExpectedCalls expected_file_calls( + {std::make_tuple("Name", absl::OkStatus()), + std::make_tuple("Read", absl::OkStatus())}); std::unique_ptr base_file( new MockRandomAccessFile(expected_file_calls)); // Configure the mock base file system. ExpectedCalls expected_fs_calls( - {std::make_tuple("NewRandomAccessFile", OkStatus())}); + {std::make_tuple("NewRandomAccessFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); base_fs->random_access_file_to_return = std::move(base_file); @@ -226,13 +233,13 @@ TEST(RetryingFileSystemTest, NewRandomAccessFile_SuccessWith3rdTry) { ExpectedCalls expected_file_calls( {std::make_tuple("Read", errors::Unavailable("Something is wrong")), std::make_tuple("Read", errors::Unavailable("Wrong again")), - std::make_tuple("Read", OkStatus())}); + std::make_tuple("Read", absl::OkStatus())}); std::unique_ptr base_file( new MockRandomAccessFile(expected_file_calls)); // Configure the mock base file system. ExpectedCalls expected_fs_calls( - {std::make_tuple("NewRandomAccessFile", OkStatus())}); + {std::make_tuple("NewRandomAccessFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); base_fs->random_access_file_to_return = std::move(base_file); @@ -258,7 +265,7 @@ TEST(RetryingFileSystemTest, NewRandomAccessFile_AllRetriesFailed) { // Configure the mock base file system. ExpectedCalls expected_fs_calls( - {std::make_tuple("NewRandomAccessFile", OkStatus())}); + {std::make_tuple("NewRandomAccessFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); base_fs->random_access_file_to_return = std::move(base_file); @@ -289,7 +296,7 @@ TEST(RetryingFileSystemTest, NewRandomAccessFile_NoRetriesForSomeErrors) { // Configure the mock base file system. ExpectedCalls expected_fs_calls( - {std::make_tuple("NewRandomAccessFile", OkStatus())}); + {std::make_tuple("NewRandomAccessFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); base_fs->random_access_file_to_return = std::move(base_file); @@ -310,15 +317,16 @@ TEST(RetryingFileSystemTest, NewRandomAccessFile_NoRetriesForSomeErrors) { TEST(RetryingFileSystemTest, NewWritableFile_ImmediateSuccess) { // Configure the mock base random access file. - ExpectedCalls expected_file_calls({std::make_tuple("Name", OkStatus()), - std::make_tuple("Sync", OkStatus()), - std::make_tuple("Close", OkStatus())}); + ExpectedCalls expected_file_calls( + {std::make_tuple("Name", absl::OkStatus()), + std::make_tuple("Sync", absl::OkStatus()), + std::make_tuple("Close", absl::OkStatus())}); std::unique_ptr base_file( new MockWritableFile(expected_file_calls)); // Configure the mock base file system. ExpectedCalls expected_fs_calls( - {std::make_tuple("NewWritableFile", OkStatus())}); + {std::make_tuple("NewWritableFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); base_fs->writable_file_to_return = std::move(base_file); @@ -342,14 +350,14 @@ TEST(RetryingFileSystemTest, NewWritableFile_SuccessWith3rdTry) { ExpectedCalls expected_file_calls( {std::make_tuple("Sync", errors::Unavailable("Something is wrong")), std::make_tuple("Sync", errors::Unavailable("Something is wrong again")), - std::make_tuple("Sync", OkStatus()), - std::make_tuple("Close", OkStatus())}); + std::make_tuple("Sync", absl::OkStatus()), + std::make_tuple("Close", absl::OkStatus())}); std::unique_ptr base_file( new MockWritableFile(expected_file_calls)); // Configure the mock base file system. ExpectedCalls expected_fs_calls( - {std::make_tuple("NewWritableFile", OkStatus())}); + {std::make_tuple("NewWritableFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); base_fs->writable_file_to_return = std::move(base_file); @@ -370,13 +378,13 @@ TEST(RetryingFileSystemTest, NewWritableFile_SuccessWith3rdTry_ViaDestructor) { {std::make_tuple("Close", errors::Unavailable("Something is wrong")), std::make_tuple("Close", errors::Unavailable("Something is wrong again")), - std::make_tuple("Close", OkStatus())}); + std::make_tuple("Close", absl::OkStatus())}); std::unique_ptr base_file( new MockWritableFile(expected_file_calls)); // Configure the mock base file system. ExpectedCalls expected_fs_calls( - {std::make_tuple("NewWritableFile", OkStatus())}); + {std::make_tuple("NewWritableFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); base_fs->writable_file_to_return = std::move(base_file); @@ -395,14 +403,14 @@ TEST(RetryingFileSystemTest, NewAppendableFile_SuccessWith3rdTry) { ExpectedCalls expected_file_calls( {std::make_tuple("Sync", errors::Unavailable("Something is wrong")), std::make_tuple("Sync", errors::Unavailable("Something is wrong again")), - std::make_tuple("Sync", OkStatus()), - std::make_tuple("Close", OkStatus())}); + std::make_tuple("Sync", absl::OkStatus()), + std::make_tuple("Close", absl::OkStatus())}); std::unique_ptr base_file( new MockWritableFile(expected_file_calls)); // Configure the mock base file system. ExpectedCalls expected_fs_calls( - {std::make_tuple("NewAppendableFile", OkStatus())}); + {std::make_tuple("NewAppendableFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); base_fs->writable_file_to_return = std::move(base_file); @@ -420,13 +428,13 @@ TEST(RetryingFileSystemTest, NewAppendableFile_SuccessWith3rdTry) { TEST(RetryingFileSystemTest, NewWritableFile_AllRetriesFailed) { // Configure the mock base random access file. ExpectedCalls expected_file_calls = CreateRetriableErrors("Sync", 11); - expected_file_calls.emplace_back(std::make_tuple("Close", OkStatus())); + expected_file_calls.emplace_back(std::make_tuple("Close", absl::OkStatus())); std::unique_ptr base_file( new MockWritableFile(expected_file_calls)); // Configure the mock base file system. ExpectedCalls expected_fs_calls( - {std::make_tuple("NewWritableFile", OkStatus())}); + {std::make_tuple("NewWritableFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); base_fs->writable_file_to_return = std::move(base_file); @@ -448,7 +456,7 @@ TEST(RetryingFileSystemTest, ExpectedCalls expected_fs_calls( {std::make_tuple("NewReadOnlyMemoryRegionFromFile", errors::Unavailable("Something is wrong")), - std::make_tuple("NewReadOnlyMemoryRegionFromFile", OkStatus())}); + std::make_tuple("NewReadOnlyMemoryRegionFromFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -478,7 +486,7 @@ TEST(RetryingFileSystemTest, GetChildren_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("GetChildren", errors::Unavailable("Something is wrong")), - std::make_tuple("GetChildren", OkStatus())}); + std::make_tuple("GetChildren", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -505,7 +513,7 @@ TEST(RetryingFileSystemTest, GetMatchingPaths_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("GetMatchingPaths", errors::Unavailable("Something is wrong")), - std::make_tuple("GetMatchingPaths", OkStatus())}); + std::make_tuple("GetMatchingPaths", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -532,7 +540,7 @@ TEST(RetryingFileSystemTest, GetMatchingPaths_AllRetriesFailed) { TEST(RetryingFileSystemTest, DeleteFile_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("DeleteFile", errors::Unavailable("Something is wrong")), - std::make_tuple("DeleteFile", OkStatus())}); + std::make_tuple("DeleteFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -556,7 +564,7 @@ TEST(RetryingFileSystemTest, DeleteFile_AllRetriesFailed) { TEST(RetryingFileSystemTest, CreateDir_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("CreateDir", errors::Unavailable("Something is wrong")), - std::make_tuple("CreateDir", OkStatus())}); + std::make_tuple("CreateDir", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -580,7 +588,7 @@ TEST(RetryingFileSystemTest, CreateDir_AllRetriesFailed) { TEST(RetryingFileSystemTest, DeleteDir_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("DeleteDir", errors::Unavailable("Something is wrong")), - std::make_tuple("DeleteDir", OkStatus())}); + std::make_tuple("DeleteDir", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -605,7 +613,7 @@ TEST(RetryingFileSystemTest, GetFileSize_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("GetFileSize", errors::Unavailable("Something is wrong")), - std::make_tuple("GetFileSize", OkStatus())}); + std::make_tuple("GetFileSize", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -631,7 +639,7 @@ TEST(RetryingFileSystemTest, GetFileSize_AllRetriesFailed) { TEST(RetryingFileSystemTest, RenameFile_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("RenameFile", errors::Unavailable("Something is wrong")), - std::make_tuple("RenameFile", OkStatus())}); + std::make_tuple("RenameFile", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -655,7 +663,7 @@ TEST(RetryingFileSystemTest, RenameFile_AllRetriesFailed) { TEST(RetryingFileSystemTest, Stat_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("Stat", errors::Unavailable("Something is wrong")), - std::make_tuple("Stat", OkStatus())}); + std::make_tuple("Stat", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -693,7 +701,7 @@ TEST(RetryingFileSystemTest, FileExists_AllRetriesFailed) { TEST(RetryingFileSystemTest, FileExists_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("FileExists", errors::Unavailable("Something is wrong")), - std::make_tuple("FileExists", OkStatus())}); + std::make_tuple("FileExists", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -706,7 +714,7 @@ TEST(RetryingFileSystemTest, IsDirectory_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("IsDirectory", errors::Unavailable("Something is wrong")), - std::make_tuple("IsDirectory", OkStatus())}); + std::make_tuple("IsDirectory", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( @@ -731,7 +739,7 @@ TEST(RetryingFileSystemTest, DeleteRecursively_SuccessWith2ndTry) { ExpectedCalls expected_fs_calls( {std::make_tuple("DeleteRecursively", errors::Unavailable("Something is wrong")), - std::make_tuple("DeleteRecursively", OkStatus())}); + std::make_tuple("DeleteRecursively", absl::OkStatus())}); std::unique_ptr base_fs( new MockFileSystem(expected_fs_calls)); RetryingFileSystem fs( diff --git a/third_party/xla/third_party/tsl/tsl/platform/retrying_utils.cc b/third_party/xla/third_party/tsl/tsl/platform/retrying_utils.cc index 3beb3f46110a24..14459e93b61ef3 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/retrying_utils.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/retrying_utils.cc @@ -54,8 +54,8 @@ double GenerateUniformRandomNumberBetween(double a, double b) { } // namespace -Status RetryingUtils::CallWithRetries(const std::function& f, - const RetryConfig& config) { +absl::Status RetryingUtils::CallWithRetries( + const std::function& f, const RetryConfig& config) { return CallWithRetries( f, [](int64_t micros) { @@ -64,8 +64,8 @@ Status RetryingUtils::CallWithRetries(const std::function& f, config); } -Status RetryingUtils::CallWithRetries( - const std::function& f, +absl::Status RetryingUtils::CallWithRetries( + const std::function& f, const std::function& sleep_usec, const RetryConfig& config) { int retries = 0; while (true) { @@ -76,7 +76,7 @@ Status RetryingUtils::CallWithRetries( if (retries >= config.max_retries) { // Return AbortedError, so that it doesn't get retried again somewhere // at a higher level. - return Status( + return absl::Status( absl::StatusCode::kAborted, strings::StrCat( "All ", config.max_retries, @@ -98,14 +98,15 @@ Status RetryingUtils::CallWithRetries( } } -Status RetryingUtils::DeleteWithRetries( - const std::function& delete_func, const RetryConfig& config) { +absl::Status RetryingUtils::DeleteWithRetries( + const std::function& delete_func, + const RetryConfig& config) { bool is_retried = false; return RetryingUtils::CallWithRetries( [delete_func, &is_retried]() { - const Status status = delete_func(); + const absl::Status status = delete_func(); if (is_retried && status.code() == error::NOT_FOUND) { - return OkStatus(); + return absl::OkStatus(); } is_retried = true; return status; diff --git a/third_party/xla/third_party/tsl/tsl/platform/retrying_utils.h b/third_party/xla/third_party/tsl/tsl/platform/retrying_utils.h index 3252da2637c4d2..470b6a8f183412 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/retrying_utils.h +++ b/third_party/xla/third_party/tsl/tsl/platform/retrying_utils.h @@ -51,20 +51,22 @@ class RetryingUtils { /// If initial_delay_microseconds is zero, no delays will be made between /// retries. /// If all retries failed, returns the last error status. - static Status CallWithRetries(const std::function& f, - const RetryConfig& config); + static absl::Status CallWithRetries(const std::function& f, + const RetryConfig& config); /// sleep_usec is a function that sleeps for the given number of microseconds. - static Status CallWithRetries(const std::function& f, - const std::function& sleep_usec, - const RetryConfig& config); + static absl::Status CallWithRetries( + const std::function& f, + const std::function& sleep_usec, + const RetryConfig& config); /// \brief A retrying wrapper for a function that deletes a resource. /// /// The function takes care of the scenario when a delete operation /// returns a failure but succeeds under the hood: if a retry returns /// NOT_FOUND, the whole operation is considered a success. - static Status DeleteWithRetries(const std::function& delete_func, - const RetryConfig& config); + static absl::Status DeleteWithRetries( + const std::function& delete_func, + const RetryConfig& config); }; // Given the total number of retries attempted, returns a randomized duration of diff --git a/third_party/xla/third_party/tsl/tsl/platform/retrying_utils_test.cc b/third_party/xla/third_party/tsl/tsl/platform/retrying_utils_test.cc index c0b3ad7b651e16..5d55ec31cc2f20 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/retrying_utils_test.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/retrying_utils_test.cc @@ -32,7 +32,9 @@ TEST(RetryingUtilsTest, CallWithRetries_RetryDelays) { std::function sleep = [&requested_delays](int64_t delay) { requested_delays.emplace_back(delay / 1000000.0); }; - std::function f = []() { return errors::Unavailable("Failed."); }; + std::function f = []() { + return errors::Unavailable("Failed."); + }; const auto& status = RetryingUtils::CallWithRetries( f, sleep, RetryConfig(500000 /* init_delay_time_us */)); @@ -58,9 +60,9 @@ TEST(RetryingUtilsTest, CallWithRetries_RetryDelays) { } TEST(RetryingUtilsTest, CallWithRetries_NotFoundIsNotRetried) { - std::vector results( + std::vector results( {errors::Unavailable("Failed."), errors::NotFound("Not found.")}); - std::function f = [&results]() { + std::function f = [&results]() { auto result = results[0]; results.erase(results.begin()); return result; @@ -70,11 +72,11 @@ TEST(RetryingUtilsTest, CallWithRetries_NotFoundIsNotRetried) { } TEST(RetryingUtilsTest, CallWithRetries_ImmediateSuccess) { - std::vector results({OkStatus()}); + std::vector results({absl::OkStatus()}); std::function sleep = [](int64_t delay) { ADD_FAILURE() << "Unexpected call to sleep."; }; - std::function f = [&results]() { + std::function f = [&results]() { auto result = results[0]; results.erase(results.begin()); return result; @@ -84,10 +86,10 @@ TEST(RetryingUtilsTest, CallWithRetries_ImmediateSuccess) { } TEST(RetryingUtilsTest, CallWithRetries_EventualSuccess) { - std::vector results({errors::Unavailable("Failed."), - errors::Unavailable("Failed again."), - OkStatus()}); - std::function f = [&results]() { + std::vector results({errors::Unavailable("Failed."), + errors::Unavailable("Failed again."), + absl::OkStatus()}); + std::function f = [&results]() { auto result = results[0]; results.erase(results.begin()); return result; @@ -97,7 +99,7 @@ TEST(RetryingUtilsTest, CallWithRetries_EventualSuccess) { } TEST(RetryingUtilsTest, DeleteWithRetries_ImmediateSuccess) { - std::vector delete_results({OkStatus()}); + std::vector delete_results({absl::OkStatus()}); const auto delete_func = [&delete_results]() { auto result = delete_results[0]; delete_results.erase(delete_results.begin()); @@ -108,7 +110,8 @@ TEST(RetryingUtilsTest, DeleteWithRetries_ImmediateSuccess) { } TEST(RetryingUtilsTest, DeleteWithRetries_EventualSuccess) { - std::vector delete_results({errors::Unavailable(""), OkStatus()}); + std::vector delete_results( + {errors::Unavailable(""), absl::OkStatus()}); const auto delete_func = [&delete_results]() { auto result = delete_results[0]; delete_results.erase(delete_results.begin()); @@ -119,7 +122,7 @@ TEST(RetryingUtilsTest, DeleteWithRetries_EventualSuccess) { } TEST(RetryingUtilsTest, DeleteWithRetries_PermissionDeniedNotRetried) { - std::vector delete_results( + std::vector delete_results( {errors::Unavailable(""), errors::PermissionDenied("")}); const auto delete_func = [&delete_results]() { auto result = delete_results[0]; @@ -131,7 +134,7 @@ TEST(RetryingUtilsTest, DeleteWithRetries_PermissionDeniedNotRetried) { } TEST(RetryingUtilsTest, DeleteWithRetries_SuccessThroughFileNotFound) { - std::vector delete_results( + std::vector delete_results( {errors::Unavailable(""), errors::NotFound("")}); const auto delete_func = [&delete_results]() { auto result = delete_results[0]; @@ -143,7 +146,7 @@ TEST(RetryingUtilsTest, DeleteWithRetries_SuccessThroughFileNotFound) { } TEST(RetryingUtilsTest, DeleteWithRetries_FirstNotFoundReturnedAsIs) { - std::vector delete_results({errors::NotFound("")}); + std::vector delete_results({errors::NotFound("")}); const auto delete_func = [&delete_results]() { auto result = delete_results[0]; delete_results.erase(delete_results.begin()); diff --git a/third_party/xla/third_party/tsl/tsl/platform/status.cc b/third_party/xla/third_party/tsl/tsl/platform/status.cc index 3f9dd1ecb58427..4256bd2f2a44e9 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/status.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/status.cc @@ -112,7 +112,7 @@ namespace errors { static constexpr const char kStackTraceProtoUrl[] = "type.googleapis.com/tensorflow.StackTracePayload"; -void SetStackTrace(::tsl::Status& status, std::vector stack_trace) { +void SetStackTrace(absl::Status& status, std::vector stack_trace) { // Given the StackFrame fields are (a) line number (b) filename (c) function // name, we can safely assume that there is no `\n` in there. // Thus, we can serialize as strings using a simple new line delimiter. @@ -134,7 +134,7 @@ void SetStackTrace(::tsl::Status& status, std::vector stack_trace) { absl::Cord(absl::StrJoin(items, "\n"))); } -std::vector GetStackTrace(const ::tsl::Status& status) { +std::vector GetStackTrace(const absl::Status& status) { std::vector stack_trace; absl::optional maybe_serialized_payload = status.GetPayload(kStackTraceProtoUrl); @@ -163,7 +163,7 @@ const char* NullTerminatedMessage(const absl::Status& status) { } #endif -std::string* TfCheckOpHelperOutOfLine(const ::tsl::Status& v, const char* msg) { +std::string* TfCheckOpHelperOutOfLine(const absl::Status& v, const char* msg) { std::string r("Non-OK-status: "); r += msg; r += " status: "; @@ -174,8 +174,8 @@ std::string* TfCheckOpHelperOutOfLine(const ::tsl::Status& v, const char* msg) { StatusGroup::StatusGroup() {} -StatusGroup::StatusGroup(std::initializer_list statuses) { - for (const Status& s : statuses) { +StatusGroup::StatusGroup(std::initializer_list statuses) { + for (const absl::Status& s : statuses) { Update(s); } } @@ -183,11 +183,11 @@ StatusGroup::StatusGroup(std::initializer_list statuses) { static constexpr const char kDerivedStatusProtoUrl[] = "type.googleapis.com/tensorflow.DerivedStatus"; -Status StatusGroup::MakeDerived(const Status& s) { +absl::Status StatusGroup::MakeDerived(const absl::Status& s) { if (IsDerived(s)) { return s; } else { - Status derived(s); + absl::Status derived(s); // TODO(b/200167936): Serialize an instance of DerivedStatus proto instead // of using the string directly. The string is never used so it is not // causing any issues at the moment. @@ -196,7 +196,7 @@ Status StatusGroup::MakeDerived(const Status& s) { } } -bool StatusGroup::IsDerived(const Status& s) { +bool StatusGroup::IsDerived(const absl::Status& s) { return s.GetPayload(kDerivedStatusProtoUrl).has_value(); } @@ -204,7 +204,7 @@ void StatusGroup::ConfigureLogHistory() { StatusLogSink::GetInstance()->enable(); } -void StatusGroup::Update(const Status& s) { +void StatusGroup::Update(const absl::Status& s) { if (s.ok()) { ++num_ok_; } else { @@ -241,25 +241,26 @@ std::unordered_map StatusGroup::GetPayloads() const { return payloads; } -Status MakeStatus(absl::StatusCode code, absl::string_view message, - const std::unordered_map& payloads) { - Status status(code, message); +absl::Status MakeStatus( + absl::StatusCode code, absl::string_view message, + const std::unordered_map& payloads) { + absl::Status status(code, message); for (const auto& payload : payloads) { status.SetPayload(payload.first, payload.second); } return status; } -std::string MakeString(const Status& status) { +std::string MakeString(const absl::Status& status) { return absl::StrCat(absl::StatusCodeToString(status.code()), ": ", status.message()); } // Summarize all the status objects in the StatusGroup. This is used when // individual Status objects in the StatusGroup are not already summarized. -Status StatusGroup::as_summary_status() const { +absl::Status StatusGroup::as_summary_status() const { if (ok_) { - return OkStatus(); + return absl::OkStatus(); } // Gather recent logs as a string @@ -322,9 +323,9 @@ Status StatusGroup::as_summary_status() const { // Concatenate all the status objects in the StatusGroup. This is used when // individual Status objects in the StatusGroup are already summarized Status. -Status StatusGroup::as_concatenated_status() const { +absl::Status StatusGroup::as_concatenated_status() const { if (ok_) { - return OkStatus(); + return absl::OkStatus(); } // If only one root status is found, return it directly. diff --git a/third_party/xla/third_party/tsl/tsl/platform/status.h b/third_party/xla/third_party/tsl/tsl/platform/status.h index b9aca1e9285a11..84954ff485a48b 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/status.h +++ b/third_party/xla/third_party/tsl/tsl/platform/status.h @@ -133,9 +133,9 @@ inline const char* NullTerminatedMessage(const absl::Status& status) { // TODO(b/197552541) Move this namespace to errors.h. namespace errors { -void SetStackTrace(::tsl::Status& status, std::vector stack_trace); +void SetStackTrace(absl::Status& status, std::vector stack_trace); -std::vector GetStackTrace(const ::tsl::Status& status); +std::vector GetStackTrace(const absl::Status& status); } // namespace errors // Helper class to manage multiple child status values. @@ -144,12 +144,12 @@ class StatusGroup { StatusGroup(); // Constructor to form a StatusGroup from any N set of Status arguments. // Usage: StatusGroup({status_a, status_b, status_c}); - StatusGroup(std::initializer_list statuses); + StatusGroup(std::initializer_list statuses); // Utility function to mark a Status as derived. By marking derived status, // Derived status messages are ignored when reporting errors to end users. - static Status MakeDerived(const Status& s); - static bool IsDerived(const Status& s); + static absl::Status MakeDerived(const absl::Status& s); + static bool IsDerived(const absl::Status& s); // Enable warning and error log collection for appending to the aggregated // status. This function may be called more than once. @@ -163,15 +163,15 @@ class StatusGroup { std::unordered_map GetPayloads() const; // Return a merged status with combined child status messages with a summary. - Status as_summary_status() const; + absl::Status as_summary_status() const; // Return a merged status with combined child status messages with // concatenation. - Status as_concatenated_status() const; + absl::Status as_concatenated_status() const; bool ok() const { return ok_; } // Augment this group with the child status `status`. - void Update(const Status& status); + void Update(const absl::Status& status); // Attach recent warning and error log messages void AttachLogMessages(); @@ -183,25 +183,24 @@ class StatusGroup { // Maintain a sorted collection of statuses. struct CompareStatus { - bool operator()(const Status& a, const Status& b) const { + bool operator()(const absl::Status& a, const absl::Status& b) const { return a.ToString() > b.ToString(); } }; // Using std::set instead of absl::btree_set to keep size for certain // dependent libraries under the limit. - std::set derived_; - std::set non_derived_; + std::set derived_; + std::set non_derived_; std::vector recent_logs_; // recent warning and error logs }; +typedef std::function StatusCallback; -typedef std::function StatusCallback; - -extern ::tsl::string* TfCheckOpHelperOutOfLine(const ::tsl::Status& v, +extern ::tsl::string* TfCheckOpHelperOutOfLine(const absl::Status& v, const char* msg); -inline ::tsl::string* TfCheckOpHelper(::tsl::Status v, const char* msg) { +inline ::tsl::string* TfCheckOpHelper(absl::Status v, const char* msg) { if (v.ok()) return nullptr; return TfCheckOpHelperOutOfLine(v, msg); } diff --git a/third_party/xla/third_party/tsl/tsl/platform/status_matchers.cc b/third_party/xla/third_party/tsl/tsl/platform/status_matchers.cc index 7546a7b20770ed..77422d564f8cda 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/status_matchers.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/status_matchers.cc @@ -40,7 +40,7 @@ void StatusIsMatcherCommonImpl::DescribeNegationTo(std::ostream* os) const { } bool StatusIsMatcherCommonImpl::MatchAndExplain( - const Status& status, + const absl::Status& status, ::testing::MatchResultListener* result_listener) const { ::testing::StringMatchResultListener inner_listener; diff --git a/third_party/xla/third_party/tsl/tsl/platform/status_matchers.h b/third_party/xla/third_party/tsl/tsl/platform/status_matchers.h index ee2144dca8a698..cb8eba40783093 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/status_matchers.h +++ b/third_party/xla/third_party/tsl/tsl/platform/status_matchers.h @@ -115,10 +115,12 @@ void PrintTo(const StatusOr& status_or, std::ostream* os) { namespace testing { namespace internal_status { -inline const Status& GetStatus(const Status& status) { return status; } +inline const absl::Status& GetStatus(const absl::Status& status) { + return status; +} template -inline const Status& GetStatus(const StatusOr& status) { +inline const absl::Status& GetStatus(const StatusOr& status) { return status.status(); } @@ -211,7 +213,7 @@ class StatusIsMatcherCommonImpl { void DescribeNegationTo(std::ostream* os) const; - bool MatchAndExplain(const Status& status, + bool MatchAndExplain(const absl::Status& status, ::testing::MatchResultListener* result_listener) const; private: diff --git a/third_party/xla/third_party/tsl/tsl/platform/status_matchers_test.cc b/third_party/xla/third_party/tsl/tsl/platform/status_matchers_test.cc index 70fc6191167c3b..ea0e0d489c24bf 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/status_matchers_test.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/status_matchers_test.cc @@ -76,13 +76,13 @@ std::string ExplainMatch(const Matcher& matcher, const V& value) { } TEST(IsOkAndHoldsTest, MatchesValue) { - StatusOr status_or_message("Hello, world"); + absl::StatusOr status_or_message("Hello, world"); EXPECT_THAT(status_or_message, IsOkAndHolds("Hello, world")); EXPECT_THAT(status_or_message, IsOkAndHolds(HasSubstr("Hello,"))); } TEST(IsOkAndHoldsTest, MatchesContainer) { - StatusOr> status_or_messages = + absl::StatusOr> status_or_messages = std::vector{"Hello, world", "Hello, tf"}; EXPECT_THAT(status_or_messages, IsOkAndHolds(ElementsAre("Hello, world", "Hello, tf"))); @@ -91,23 +91,23 @@ TEST(IsOkAndHoldsTest, MatchesContainer) { } TEST(IsOkAndHoldsTest, DoesNotMatchStatus) { - StatusOr status_or_message = + absl::StatusOr status_or_message = errors::InvalidArgument("Invalid argument"); EXPECT_THAT(status_or_message, Not(IsOkAndHolds("Hello, world"))); } TEST(IsOkAndHoldsTest, DoesNotMatchValue) { - StatusOr status_or_message("Hello, tf"); + absl::StatusOr status_or_message("Hello, tf"); EXPECT_THAT(status_or_message, Not(IsOkAndHolds("Hello, world"))); } TEST(IsOkAndHoldsTest, DoesNotMatchContainer) { - StatusOr> status_or_container({1, 2, 3}); + absl::StatusOr> status_or_container({1, 2, 3}); EXPECT_THAT(status_or_container, Not(IsOkAndHolds(ElementsAre(4, 5, 6)))); } TEST(IsOkAndHoldsTest, DescribeExpectedValue) { - Matcher> is_ok_and_has_substr = + Matcher> is_ok_and_has_substr = IsOkAndHolds(HasSubstr("Hello")); EXPECT_EQ(Describe(is_ok_and_has_substr), "is OK and has a value that has substring \"Hello\""); @@ -116,20 +116,22 @@ TEST(IsOkAndHoldsTest, DescribeExpectedValue) { } TEST(IsOkAndHoldsTest, ExplainNotMatchingStatus) { - Matcher> is_ok_and_less_than = IsOkAndHolds(LessThan(100)); - StatusOr status = errors::Unknown("Unknown"); + Matcher> is_ok_and_less_than = + IsOkAndHolds(LessThan(100)); + absl::StatusOr status = errors::Unknown("Unknown"); EXPECT_THAT(ExplainMatch(is_ok_and_less_than, status), HasSubstr("which has status UNKNOWN: Unknown")); } TEST(IsOkAndHoldsTest, ExplainNotMatchingValue) { - Matcher> is_ok_and_less_than = IsOkAndHolds(LessThan(100)); + Matcher> is_ok_and_less_than = + IsOkAndHolds(LessThan(100)); EXPECT_EQ(ExplainMatch(is_ok_and_less_than, 120), "which contains value 120, which is 20 more than 100"); } TEST(IsOkAndHoldsTest, ExplainNotMatchingContainer) { - Matcher>> is_ok_and_less_than = + Matcher>> is_ok_and_less_than = IsOkAndHolds(ElementsAre(1, 2, 3)); std::vector actual{4, 5, 6}; EXPECT_THAT(ExplainMatch(is_ok_and_less_than, actual), @@ -137,20 +139,20 @@ TEST(IsOkAndHoldsTest, ExplainNotMatchingContainer) { } TEST(StatusIsTest, MatchesOK) { - EXPECT_THAT(OkStatus(), StatusIs(error::OK)); - StatusOr message("Hello, world"); + EXPECT_THAT(absl::OkStatus(), StatusIs(error::OK)); + absl::StatusOr message("Hello, world"); EXPECT_THAT(message, StatusIs(error::OK)); } TEST(StatusIsTest, DoesNotMatchOk) { EXPECT_THAT(errors::DeadlineExceeded("Deadline exceeded"), Not(StatusIs(error::OK))); - StatusOr status = errors::NotFound("Not found"); + absl::StatusOr status = errors::NotFound("Not found"); EXPECT_THAT(status, Not(StatusIs(error::OK))); } TEST(StatusIsTest, MatchesStatus) { - Status s = errors::Cancelled("Cancelled"); + absl::Status s = errors::Cancelled("Cancelled"); EXPECT_THAT(s, StatusIs(error::CANCELLED)); EXPECT_THAT(s, StatusIs(error::CANCELLED, "Cancelled")); EXPECT_THAT(s, StatusIs(_, "Cancelled")); @@ -161,7 +163,7 @@ TEST(StatusIsTest, MatchesStatus) { } TEST(StatusIsTest, StatusOrMatchesStatus) { - StatusOr s = errors::InvalidArgument("Invalid Argument"); + absl::StatusOr s = errors::InvalidArgument("Invalid Argument"); EXPECT_THAT(s, StatusIs(error::INVALID_ARGUMENT)); EXPECT_THAT(s, StatusIs(error::INVALID_ARGUMENT, "Invalid Argument")); EXPECT_THAT(s, StatusIs(_, "Invalid Argument")); @@ -172,7 +174,7 @@ TEST(StatusIsTest, StatusOrMatchesStatus) { } TEST(StatusIsTest, DoesNotMatchStatus) { - Status s = errors::Internal("Internal"); + absl::Status s = errors::Internal("Internal"); EXPECT_THAT(s, Not(StatusIs(error::FAILED_PRECONDITION))); EXPECT_THAT(s, Not(StatusIs(error::INTERNAL, "Failed Precondition"))); EXPECT_THAT(s, Not(StatusIs(_, "Failed Precondition"))); @@ -180,7 +182,7 @@ TEST(StatusIsTest, DoesNotMatchStatus) { } TEST(StatusIsTest, StatusOrDoesNotMatchStatus) { - StatusOr s = errors::FailedPrecondition("Failed Precondition"); + absl::StatusOr s = errors::FailedPrecondition("Failed Precondition"); EXPECT_THAT(s, Not(StatusIs(error::INTERNAL))); EXPECT_THAT(s, Not(StatusIs(error::FAILED_PRECONDITION, "Internal"))); EXPECT_THAT(s, Not(StatusIs(_, "Internal"))); @@ -188,7 +190,7 @@ TEST(StatusIsTest, StatusOrDoesNotMatchStatus) { } TEST(StatusIsTest, DescribeExpectedValue) { - Matcher status_is = + Matcher status_is = StatusIs(error::UNAVAILABLE, std::string("Unavailable")); EXPECT_EQ(Describe(status_is), "has a status code that is equal to UNAVAILABLE, " @@ -196,7 +198,7 @@ TEST(StatusIsTest, DescribeExpectedValue) { } TEST(StatusIsTest, DescribeNegatedExpectedValue) { - Matcher> status_is = + Matcher> status_is = StatusIs(error::ABORTED, std::string("Aborted")); EXPECT_EQ(DescribeNegation(status_is), "has a status code that isn't equal to ABORTED, " @@ -204,60 +206,61 @@ TEST(StatusIsTest, DescribeNegatedExpectedValue) { } TEST(StatusIsTest, ExplainNotMatchingErrorCode) { - Matcher status_is = StatusIs(error::NOT_FOUND, _); - const Status status = errors::AlreadyExists("Already exists"); + Matcher status_is = StatusIs(error::NOT_FOUND, _); + const absl::Status status = errors::AlreadyExists("Already exists"); EXPECT_EQ(ExplainMatch(status_is, status), "whose status code is wrong"); } TEST(StatusIsTest, ExplainNotMatchingErrorMessage) { - Matcher status_is = StatusIs(error::NOT_FOUND, "Not found"); - const Status status = errors::NotFound("Already exists"); + Matcher status_is = StatusIs(error::NOT_FOUND, "Not found"); + const absl::Status status = errors::NotFound("Already exists"); EXPECT_EQ(ExplainMatch(status_is, status), "whose error message is wrong"); } TEST(StatusIsTest, ExplainStatusOrNotMatchingErrorCode) { - Matcher> status_is = StatusIs(error::ALREADY_EXISTS, _); - const StatusOr status_or = errors::NotFound("Not found"); + Matcher> status_is = StatusIs(error::ALREADY_EXISTS, _); + const absl::StatusOr status_or = errors::NotFound("Not found"); EXPECT_EQ(ExplainMatch(status_is, status_or), "whose status code is wrong"); } TEST(StatusIsTest, ExplainStatusOrNotMatchingErrorMessage) { - Matcher> status_is = + Matcher> status_is = StatusIs(error::ALREADY_EXISTS, "Already exists"); - const StatusOr status_or = errors::AlreadyExists("Not found"); + const absl::StatusOr status_or = errors::AlreadyExists("Not found"); EXPECT_EQ(ExplainMatch(status_is, status_or), "whose error message is wrong"); } TEST(StatusIsTest, ExplainStatusOrHasValue) { - Matcher> status_is = + Matcher> status_is = StatusIs(error::RESOURCE_EXHAUSTED, "Resource exhausted"); - const StatusOr value = -1; + const absl::StatusOr value = -1; EXPECT_EQ(ExplainMatch(status_is, value), "whose status code is wrong"); } TEST(IsOkTest, MatchesOK) { - EXPECT_THAT(OkStatus(), IsOk()); - StatusOr message = std::string("Hello, world"); + EXPECT_THAT(absl::OkStatus(), IsOk()); + absl::StatusOr message = std::string("Hello, world"); EXPECT_THAT(message, IsOk()); } TEST(IsOkTest, DoesNotMatchOK) { EXPECT_THAT(errors::PermissionDenied("Permission denied"), Not(IsOk())); - StatusOr status = errors::Unauthenticated("Unauthenticated"); + absl::StatusOr status = + errors::Unauthenticated("Unauthenticated"); EXPECT_THAT(status, Not(IsOk())); } TEST(IsOkTest, DescribeExpectedValue) { - Matcher status_is_ok = IsOk(); + Matcher status_is_ok = IsOk(); EXPECT_EQ(Describe(status_is_ok), "is OK"); - Matcher> status_or_is_ok = IsOk(); + Matcher> status_or_is_ok = IsOk(); EXPECT_EQ(Describe(status_or_is_ok), "is OK"); } TEST(IsOkTest, DescribeNegatedExpectedValue) { - Matcher status_is_ok = IsOk(); + Matcher status_is_ok = IsOk(); EXPECT_EQ(DescribeNegation(status_is_ok), "is not OK"); - Matcher> status_or_is_ok = IsOk(); + Matcher> status_or_is_ok = IsOk(); EXPECT_EQ(DescribeNegation(status_or_is_ok), "is not OK"); } diff --git a/third_party/xla/third_party/tsl/tsl/platform/status_test.cc b/third_party/xla/third_party/tsl/tsl/platform/status_test.cc index bccf969e63b662..b95de35e181be1 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/status_test.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/status_test.cc @@ -34,7 +34,7 @@ using ::tsl::testing::IsOk; using ::tsl::testing::StatusIs; TEST(ToStringTest, PayloadsArePrinted) { - Status status = errors::Aborted("Aborted Error Message"); + absl::Status status = errors::Aborted("Aborted Error Message"); status.SetPayload("payload_key", absl::Cord(absl::StrFormat( "payload_value %c%c%c", 1, 2, 3))); @@ -44,7 +44,7 @@ TEST(ToStringTest, PayloadsArePrinted) { } TEST(ToStringTest, MatchesAbslStatus) { - Status status = errors::Aborted("Aborted Error Message"); + absl::Status status = errors::Aborted("Aborted Error Message"); status.SetPayload("payload_key", absl::Cord(absl::StrFormat( "payload_value %c%c%c", 1, 2, 3))); @@ -57,7 +57,7 @@ TEST(ToStringTest, MatchesAbslStatus) { } TEST(StackTrace, SerializeAndDeserializeCorrectly) { - Status status = errors::Aborted("Aborted Error Message"); + absl::Status status = errors::Aborted("Aborted Error Message"); std::vector stack_trace; stack_trace.push_back(StackFrame("filename_1", 33, "func_name_1")); stack_trace.push_back(StackFrame("filename_2", 66, "func_name_2")); @@ -72,11 +72,11 @@ TEST(StackTrace, SerializeAndDeserializeCorrectly) { } TEST(StatusGroupTest, DeterministicOrderWithoutPayloads) { - Status status_a = errors::Aborted("Status A"); - Status status_b = errors::Aborted("Status B"); - Status status_c = errors::Aborted("Status C"); + absl::Status status_a = errors::Aborted("Status A"); + absl::Status status_b = errors::Aborted("Status B"); + absl::Status status_c = errors::Aborted("Status C"); - Status combined = + absl::Status combined = StatusGroup({status_a, status_b, status_c}).as_summary_status(); EXPECT_EQ(combined, @@ -94,14 +94,14 @@ TEST(StatusGroupTest, DeterministicOrderWithoutPayloads) { } TEST(StatusGroupTest, DeterministicOrderWithPayloads) { - Status status_a = errors::Aborted("Status A"); + absl::Status status_a = errors::Aborted("Status A"); status_a.SetPayload("payload_key", absl::Cord("payload_value_a")); - Status status_b = errors::Aborted("Status B"); + absl::Status status_b = errors::Aborted("Status B"); status_b.SetPayload("payload_key", absl::Cord("payload_value_b")); - Status status_c = errors::Aborted("Status C"); + absl::Status status_c = errors::Aborted("Status C"); status_c.SetPayload("payload_key", absl::Cord("payload_value_c")); - Status combined = + absl::Status combined = StatusGroup({status_a, status_b, status_c}).as_summary_status(); ASSERT_TRUE(combined.GetPayload("payload_key").has_value()); std::string payload(combined.GetPayload("payload_key").value()); @@ -127,16 +127,16 @@ TEST(StatusGroupTest, DeterministicOrderWithPayloads) { } TEST(StatusGroupTest, PayloadsMergedProperly) { - Status status_a = errors::Aborted("Status A"); + absl::Status status_a = errors::Aborted("Status A"); status_a.SetPayload("payload_key_a", absl::Cord(std::string("payload_value_a"))); - Status status_b = errors::Aborted("Status B"); + absl::Status status_b = errors::Aborted("Status B"); status_b.SetPayload("payload_key_b", absl::Cord(std::string("payload_value_b"))); - Status status_c = errors::Aborted("Status C"); + absl::Status status_c = errors::Aborted("Status C"); status_c.SetPayload("payload_key_c", absl::Cord(std::string("payload_value_c"))); - Status derived_status_c = + absl::Status derived_status_c = StatusGroup::MakeDerived(errors::Aborted("Status C")); derived_status_c.SetPayload( "payload_key_c", absl::Cord(std::string("derived_payload_value_c"))); @@ -144,14 +144,14 @@ TEST(StatusGroupTest, PayloadsMergedProperly) { StatusGroup status_group({status_a, status_b, status_c, derived_status_c}); EXPECT_THAT(status_group.GetPayloads(), ::testing::SizeIs(3)); - Status combined = status_group.as_summary_status(); + absl::Status combined = status_group.as_summary_status(); EXPECT_EQ(combined.GetPayload("payload_key_a"), "payload_value_a"); EXPECT_EQ(combined.GetPayload("payload_key_b"), "payload_value_b"); EXPECT_EQ(combined.GetPayload("payload_key_c"), "payload_value_c"); } TEST(Status, ErrorStatusForEachPayloadIteratesOverAll) { - Status s(absl::StatusCode::kInternal, "Error message"); + absl::Status s(absl::StatusCode::kInternal, "Error message"); s.SetPayload("key1", absl::Cord("value1")); s.SetPayload("key2", absl::Cord("value2")); s.SetPayload("key3", absl::Cord("value3")); @@ -168,7 +168,7 @@ TEST(Status, ErrorStatusForEachPayloadIteratesOverAll) { } TEST(Status, OkStatusForEachPayloadNoIteration) { - Status s = OkStatus(); + absl::Status s = absl::OkStatus(); s.SetPayload("key1", absl::Cord("value1")); s.SetPayload("key2", absl::Cord("value2")); s.SetPayload("key3", absl::Cord("value3")); @@ -182,7 +182,7 @@ TEST(Status, OkStatusForEachPayloadNoIteration) { } TEST(Status, SaveOKStatusToProto) { - tensorflow::StatusProto status_proto = StatusToProto(OkStatus()); + tensorflow::StatusProto status_proto = StatusToProto(absl::OkStatus()); EXPECT_EQ(status_proto.code(), error::OK); EXPECT_THAT(status_proto.message(), IsEmpty()); } @@ -195,7 +195,7 @@ TEST(Status, SaveErrorStatusToProto) { } TEST(Status, SaveEmptyStatusToProto) { - tensorflow::StatusProto status_proto = StatusToProto(Status()); + tensorflow::StatusProto status_proto = StatusToProto(absl::Status()); EXPECT_EQ(status_proto.code(), error::OK); EXPECT_THAT(status_proto.message(), IsEmpty()); } diff --git a/third_party/xla/third_party/tsl/tsl/platform/status_to_from_proto.cc b/third_party/xla/third_party/tsl/tsl/platform/status_to_from_proto.cc index 250e2e1ed4f670..96ad290f92c71a 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/status_to_from_proto.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/status_to_from_proto.cc @@ -22,7 +22,7 @@ limitations under the License. namespace tsl { -tensorflow::StatusProto StatusToProto(const Status& s) { +tensorflow::StatusProto StatusToProto(const absl::Status& s) { tensorflow::StatusProto status_proto; if (s.ok()) { return status_proto; @@ -36,13 +36,13 @@ tensorflow::StatusProto StatusToProto(const Status& s) { } #if defined(PLATFORM_GOOGLE) -Status StatusFromProto(const tensorflow::StatusProto& proto, - absl::SourceLocation loc) { +absl::Status StatusFromProto(const tensorflow::StatusProto& proto, + absl::SourceLocation loc) { if (proto.code() == tensorflow::error::OK) { - return OkStatus(); + return absl::OkStatus(); } - return Status(static_cast(proto.code()), proto.message(), - loc); + return absl::Status(static_cast(proto.code()), + proto.message(), loc); } #else Status StatusFromProto(const tensorflow::StatusProto& proto) { diff --git a/third_party/xla/third_party/tsl/tsl/platform/status_to_from_proto.h b/third_party/xla/third_party/tsl/tsl/platform/status_to_from_proto.h index 6abbe78dc0ef69..9891737f08159c 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/status_to_from_proto.h +++ b/third_party/xla/third_party/tsl/tsl/platform/status_to_from_proto.h @@ -28,11 +28,11 @@ namespace tsl { // Symbol not found: tensorflow11StatusProtoC1EPN6protobuf5ArenaEb // Converts a `Status` to a `StatusProto`. -tensorflow::StatusProto StatusToProto(const Status& s); +tensorflow::StatusProto StatusToProto(const absl::Status& s); #if defined(PLATFORM_GOOGLE) // Constructs a `Status` from a `StatusProto`. -Status StatusFromProto( +absl::Status StatusFromProto( const tensorflow::StatusProto& proto, absl::SourceLocation loc = absl::SourceLocation::current()); #else diff --git a/third_party/xla/third_party/tsl/tsl/platform/statusor_test.cc b/third_party/xla/third_party/tsl/tsl/platform/statusor_test.cc index 9cfc44de7167ed..fd0ee7886073b4 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/statusor_test.cc +++ b/third_party/xla/third_party/tsl/tsl/platform/statusor_test.cc @@ -67,7 +67,7 @@ class NoDefaultConstructor { static_assert(!std::is_default_constructible(), "Should not be default-constructible."); -StatusOr> ReturnUniquePtr() { +absl::StatusOr> ReturnUniquePtr() { // Uses implicit constructor from T&& return std::unique_ptr(new int(0)); } @@ -75,25 +75,25 @@ StatusOr> ReturnUniquePtr() { TEST(StatusOr, NullPointerStatusOr) { // As a very special case, null-plain-pointer StatusOr used to be an // error. Test that it no longer is. - StatusOr null_status(nullptr); + absl::StatusOr null_status(nullptr); EXPECT_TRUE(null_status.ok()); EXPECT_EQ(null_status.value(), nullptr); } TEST(StatusOr, TestNoDefaultConstructorInitialization) { // Explicitly initialize it with an error code. - StatusOr statusor(errors::Cancelled("")); + absl::StatusOr statusor(errors::Cancelled("")); EXPECT_FALSE(statusor.ok()); EXPECT_EQ(statusor.status().code(), absl::StatusCode::kCancelled); // Default construction of StatusOr initializes it with an UNKNOWN error code. - StatusOr statusor2; + absl::StatusOr statusor2; EXPECT_FALSE(statusor2.ok()); EXPECT_EQ(statusor2.status().code(), absl::StatusCode::kUnknown); } TEST(StatusOr, TestMoveOnlyInitialization) { - StatusOr> thing(ReturnUniquePtr()); + absl::StatusOr> thing(ReturnUniquePtr()); ASSERT_TRUE(thing.ok()); EXPECT_EQ(0, *thing.value()); int* previous = thing.value().get(); @@ -105,12 +105,12 @@ TEST(StatusOr, TestMoveOnlyInitialization) { } TEST(StatusOr, TestMoveOnlyStatusCtr) { - StatusOr> thing(errors::Cancelled("")); + absl::StatusOr> thing(errors::Cancelled("")); ASSERT_FALSE(thing.ok()); } TEST(StatusOr, TestMoveOnlyValueExtraction) { - StatusOr> thing(ReturnUniquePtr()); + absl::StatusOr> thing(ReturnUniquePtr()); ASSERT_TRUE(thing.ok()); std::unique_ptr ptr = std::move(thing).value(); EXPECT_EQ(0, *ptr); @@ -121,7 +121,7 @@ TEST(StatusOr, TestMoveOnlyValueExtraction) { } TEST(StatusOr, TestMoveOnlyConversion) { - StatusOr> const_thing(ReturnUniquePtr()); + absl::StatusOr> const_thing(ReturnUniquePtr()); EXPECT_TRUE(const_thing.ok()); EXPECT_EQ(0, *const_thing.value()); @@ -135,7 +135,7 @@ TEST(StatusOr, TestMoveOnlyConversion) { TEST(StatusOr, TestMoveOnlyVector) { // Sanity check that StatusOr works in vector. - std::vector>> vec; + std::vector>> vec; vec.push_back(ReturnUniquePtr()); vec.resize(2); auto another_vec = std::move(vec); @@ -144,11 +144,13 @@ TEST(StatusOr, TestMoveOnlyVector) { } TEST(StatusOr, TestMoveWithValuesAndErrors) { - StatusOr status_or(std::string(1000, '0')); - StatusOr value1(std::string(1000, '1')); - StatusOr value2(std::string(1000, '2')); - StatusOr error1(Status(absl::StatusCode::kUnknown, "error1")); - StatusOr error2(Status(absl::StatusCode::kUnknown, "error2")); + absl::StatusOr status_or(std::string(1000, '0')); + absl::StatusOr value1(std::string(1000, '1')); + absl::StatusOr value2(std::string(1000, '2')); + absl::StatusOr error1( + absl::Status(absl::StatusCode::kUnknown, "error1")); + absl::StatusOr error2( + absl::Status(absl::StatusCode::kUnknown, "error2")); ASSERT_TRUE(status_or.ok()); EXPECT_EQ(std::string(1000, '0'), status_or.value()); @@ -175,11 +177,13 @@ TEST(StatusOr, TestMoveWithValuesAndErrors) { } TEST(StatusOr, TestCopyWithValuesAndErrors) { - StatusOr status_or(std::string(1000, '0')); - StatusOr value1(std::string(1000, '1')); - StatusOr value2(std::string(1000, '2')); - StatusOr error1(Status(absl::StatusCode::kUnknown, "error1")); - StatusOr error2(Status(absl::StatusCode::kUnknown, "error2")); + absl::StatusOr status_or(std::string(1000, '0')); + absl::StatusOr value1(std::string(1000, '1')); + absl::StatusOr value2(std::string(1000, '2')); + absl::StatusOr error1( + absl::Status(absl::StatusCode::kUnknown, "error1")); + absl::StatusOr error2( + absl::Status(absl::StatusCode::kUnknown, "error2")); ASSERT_TRUE(status_or.ok()); EXPECT_EQ(std::string(1000, '0'), status_or.value()); @@ -212,13 +216,13 @@ TEST(StatusOr, TestCopyWithValuesAndErrors) { } TEST(StatusOr, TestDefaultCtor) { - StatusOr thing; + absl::StatusOr thing; EXPECT_FALSE(thing.ok()); EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown); } TEST(StatusOrDeathTest, TestDefaultCtorValue) { - StatusOr thing; + absl::StatusOr thing; #ifdef ABSL_HAVE_EXCEPTIONS try { thing.value(); @@ -230,7 +234,7 @@ TEST(StatusOrDeathTest, TestDefaultCtorValue) { EXPECT_DEATH(thing.value(), ""); #endif - const StatusOr thing2; + const absl::StatusOr thing2; #ifdef ABSL_HAVE_EXCEPTIONS try { thing.value(); @@ -244,93 +248,94 @@ TEST(StatusOrDeathTest, TestDefaultCtorValue) { } TEST(StatusOr, TestStatusCtor) { - StatusOr thing(Status(absl::StatusCode::kCancelled, "")); + absl::StatusOr thing(absl::Status(absl::StatusCode::kCancelled, "")); EXPECT_FALSE(thing.ok()); EXPECT_EQ(thing.status().code(), absl::StatusCode::kCancelled); } TEST(StatusOr, TestValueCtor) { const int kI = 4; - const StatusOr thing(kI); + const absl::StatusOr thing(kI); EXPECT_TRUE(thing.ok()); EXPECT_EQ(kI, thing.value()); } TEST(StatusOr, TestCopyCtorStatusOk) { const int kI = 4; - const StatusOr original(kI); - const StatusOr copy(original); + const absl::StatusOr original(kI); + const absl::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); EXPECT_EQ(original.value(), copy.value()); } TEST(StatusOr, TestCopyCtorStatusNotOk) { - StatusOr original(Status(absl::StatusCode::kCancelled, "")); - StatusOr copy(original); + absl::StatusOr original(absl::Status(absl::StatusCode::kCancelled, "")); + absl::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); } TEST(StatusOr, TestCopyCtorNonAssignable) { const int kI = 4; CopyNoAssign value(kI); - StatusOr original(value); - StatusOr copy(original); + absl::StatusOr original(value); + absl::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); EXPECT_EQ(original.value().foo_, copy.value().foo_); } TEST(StatusOr, TestCopyCtorStatusOKConverting) { const int kI = 4; - StatusOr original(kI); - StatusOr copy(original); + absl::StatusOr original(kI); + absl::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); EXPECT_DOUBLE_EQ(original.value(), copy.value()); } TEST(StatusOr, TestCopyCtorStatusNotOkConverting) { - StatusOr original(Status(absl::StatusCode::kCancelled, "")); - StatusOr copy(original); + absl::StatusOr original(absl::Status(absl::StatusCode::kCancelled, "")); + absl::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); } TEST(StatusOr, TestAssignmentStatusOk) { const int kI = 4; - StatusOr source(kI); - StatusOr target; + absl::StatusOr source(kI); + absl::StatusOr target; target = source; EXPECT_EQ(target.status(), source.status()); EXPECT_EQ(source.value(), target.value()); } TEST(StatusOr, TestAssignmentStatusNotOk) { - StatusOr source(Status(absl::StatusCode::kCancelled, "")); - StatusOr target; + absl::StatusOr source(absl::Status(absl::StatusCode::kCancelled, "")); + absl::StatusOr target; target = source; EXPECT_EQ(target.status(), source.status()); } TEST(StatusOr, TestStatus) { - StatusOr good(4); + absl::StatusOr good(4); EXPECT_TRUE(good.ok()); - StatusOr bad(Status(absl::StatusCode::kCancelled, "")); + absl::StatusOr bad(absl::Status(absl::StatusCode::kCancelled, "")); EXPECT_FALSE(bad.ok()); - EXPECT_EQ(bad.status(), Status(absl::StatusCode::kCancelled, "")); + EXPECT_EQ(bad.status(), absl::Status(absl::StatusCode::kCancelled, "")); } TEST(StatusOr, TestValue) { const int kI = 4; - StatusOr thing(kI); + absl::StatusOr thing(kI); EXPECT_EQ(kI, thing.value()); } TEST(StatusOr, TestValueConst) { const int kI = 4; - const StatusOr thing(kI); + const absl::StatusOr thing(kI); EXPECT_EQ(kI, thing.value()); } TEST(StatusOrDeathTest, TestValueNotOk) { - StatusOr thing(Status(absl::StatusCode::kCancelled, "cancelled")); + absl::StatusOr thing( + absl::Status(absl::StatusCode::kCancelled, "cancelled")); #ifdef ABSL_HAVE_EXCEPTIONS try { thing.value(); @@ -344,7 +349,7 @@ TEST(StatusOrDeathTest, TestValueNotOk) { } TEST(StatusOrDeathTest, TestValueNotOkConst) { - const StatusOr thing(Status(absl::StatusCode::kUnknown, "")); + const absl::StatusOr thing(absl::Status(absl::StatusCode::kUnknown, "")); #ifdef ABSL_HAVE_EXCEPTIONS try { thing.value(); @@ -358,13 +363,13 @@ TEST(StatusOrDeathTest, TestValueNotOkConst) { } TEST(StatusOr, TestPointerDefaultCtor) { - StatusOr thing; + absl::StatusOr thing; EXPECT_FALSE(thing.ok()); EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown); } TEST(StatusOrDeathTest, TestPointerDefaultCtorValue) { - StatusOr thing; + absl::StatusOr thing; #ifdef ABSL_HAVE_EXCEPTIONS try { thing.value(); @@ -378,94 +383,97 @@ TEST(StatusOrDeathTest, TestPointerDefaultCtorValue) { } TEST(StatusOr, TestPointerStatusCtor) { - StatusOr thing(Status(absl::StatusCode::kCancelled, "")); + absl::StatusOr thing(absl::Status(absl::StatusCode::kCancelled, "")); EXPECT_FALSE(thing.ok()); - EXPECT_EQ(thing.status(), Status(absl::StatusCode::kCancelled, "")); + EXPECT_EQ(thing.status(), absl::Status(absl::StatusCode::kCancelled, "")); } TEST(StatusOr, TestPointerValueCtor) { const int kI = 4; - StatusOr thing(&kI); + absl::StatusOr thing(&kI); EXPECT_TRUE(thing.ok()); EXPECT_EQ(&kI, thing.value()); } TEST(StatusOr, TestPointerCopyCtorStatusOk) { const int kI = 0; - StatusOr original(&kI); - StatusOr copy(original); + absl::StatusOr original(&kI); + absl::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); EXPECT_EQ(original.value(), copy.value()); } TEST(StatusOr, TestPointerCopyCtorStatusNotOk) { - StatusOr original(Status(absl::StatusCode::kCancelled, "")); - StatusOr copy(original); + absl::StatusOr original(absl::Status(absl::StatusCode::kCancelled, "")); + absl::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); } TEST(StatusOr, TestPointerCopyCtorStatusOKConverting) { Derived derived; - StatusOr original(&derived); - StatusOr copy(original); + absl::StatusOr original(&derived); + absl::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); EXPECT_EQ(static_cast(original.value()), copy.value()); } TEST(StatusOr, TestPointerCopyCtorStatusNotOkConverting) { - StatusOr original(Status(absl::StatusCode::kCancelled, "")); - StatusOr copy(original); + absl::StatusOr original( + absl::Status(absl::StatusCode::kCancelled, "")); + absl::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); } TEST(StatusOr, TestPointerAssignmentStatusOk) { const int kI = 0; - StatusOr source(&kI); - StatusOr target; + absl::StatusOr source(&kI); + absl::StatusOr target; target = source; EXPECT_EQ(target.status(), source.status()); EXPECT_EQ(source.value(), target.value()); } TEST(StatusOr, TestPointerAssignmentStatusNotOk) { - StatusOr source(Status(absl::StatusCode::kCancelled, "")); - StatusOr target; + absl::StatusOr source(absl::Status(absl::StatusCode::kCancelled, "")); + absl::StatusOr target; target = source; EXPECT_EQ(target.status(), source.status()); } TEST(StatusOr, TestPointerStatus) { const int kI = 0; - StatusOr good(&kI); + absl::StatusOr good(&kI); EXPECT_TRUE(good.ok()); - StatusOr bad(Status(absl::StatusCode::kCancelled, "")); - EXPECT_EQ(bad.status(), Status(absl::StatusCode::kCancelled, "")); + absl::StatusOr bad( + absl::Status(absl::StatusCode::kCancelled, "")); + EXPECT_EQ(bad.status(), absl::Status(absl::StatusCode::kCancelled, "")); } TEST(StatusOr, TestPointerValue) { const int kI = 0; - StatusOr thing(&kI); + absl::StatusOr thing(&kI); EXPECT_EQ(&kI, thing.value()); } TEST(StatusOr, TestPointerValueConst) { const int kI = 0; - const StatusOr thing(&kI); + const absl::StatusOr thing(&kI); EXPECT_EQ(&kI, thing.value()); } TEST(StatusOr, TestArrowOperator) { - StatusOr> uptr = ReturnUniquePtr(); + absl::StatusOr> uptr = ReturnUniquePtr(); EXPECT_EQ(*uptr->get(), 0); } TEST(StatusOr, TestStarOperator) { - StatusOr> uptr = ReturnUniquePtr(); + absl::StatusOr> uptr = ReturnUniquePtr(); EXPECT_EQ(**uptr, 0); } TEST(StatusOr, TestStarOperatorDeath) { - StatusOr error(Status(absl::StatusCode::kCancelled, "cancelled")); + absl::StatusOr error( + absl::Status(absl::StatusCode::kCancelled, "cancelled")); EXPECT_DEATH(*error, "cancelled"); } @@ -478,7 +486,7 @@ TEST(StatusOr, TestStarOperatorDeath) { // v.reserve(v.capacity() + 10); // } -static StatusOr MakeStatus() { return 100; } +static absl::StatusOr MakeStatus() { return 100; } // A factory to help us benchmark the various factory styles. All of // the factory methods are marked as non-inlineable so as to more // accurately simulate calling a factory for which you do not have @@ -502,25 +510,25 @@ class BenchmarkFactory { // A more sophisticated factory, which returns a status to indicate // the result of the operation. The factory result is populated into // the user provided pointer result. - Status ArgumentFactory(T** result) TF_ATTRIBUTE_NOINLINE { + absl::Status ArgumentFactory(T** result) TF_ATTRIBUTE_NOINLINE { *result = value_; - return OkStatus(); + return absl::OkStatus(); } - Status ArgumentFactoryFail(T** result) TF_ATTRIBUTE_NOINLINE { + absl::Status ArgumentFactoryFail(T** result) TF_ATTRIBUTE_NOINLINE { *result = nullptr; - return Status(absl::StatusCode::kCancelled, ""); + return absl::Status(absl::StatusCode::kCancelled, ""); } - Status ArgumentFactoryFailShortMsg(T** result) TF_ATTRIBUTE_NOINLINE { + absl::Status ArgumentFactoryFailShortMsg(T** result) TF_ATTRIBUTE_NOINLINE { *result = nullptr; - return Status(absl::StatusCode::kInternal, ""); + return absl::Status(absl::StatusCode::kInternal, ""); } - Status ArgumentFactoryFailLongMsg(T** result) TF_ATTRIBUTE_NOINLINE { + absl::Status ArgumentFactoryFailLongMsg(T** result) TF_ATTRIBUTE_NOINLINE { *result = nullptr; - return Status(absl::StatusCode::kInternal, - "a big string of message junk that will never be read"); + return absl::Status(absl::StatusCode::kInternal, + "a big string of message junk that will never be read"); } // A factory that returns a StatusOr. If the factory operation @@ -531,16 +539,16 @@ class BenchmarkFactory { } StatusOr StatusOrFactoryFail() TF_ATTRIBUTE_NOINLINE { - return Status(absl::StatusCode::kCancelled, ""); + return absl::Status(absl::StatusCode::kCancelled, ""); } StatusOr StatusOrFactoryFailShortMsg() TF_ATTRIBUTE_NOINLINE { - return Status(absl::StatusCode::kInternal, ""); + return absl::Status(absl::StatusCode::kInternal, ""); } StatusOr StatusOrFactoryFailLongMsg() TF_ATTRIBUTE_NOINLINE { - return Status(absl::StatusCode::kInternal, - "a big string of message junk that will never be read"); + return absl::Status(absl::StatusCode::kInternal, + "a big string of message junk that will never be read"); } private: @@ -594,7 +602,7 @@ void BM_ArgumentFactory(::testing::benchmark::State& state) { BenchmarkFactory factory; for (auto s : state) { BenchmarkType* result = nullptr; - Status status = factory.ArgumentFactory(&result); + absl::Status status = factory.ArgumentFactory(&result); if (status.ok() && result != nullptr) { result->DoWork(); } @@ -607,7 +615,7 @@ BENCHMARK(BM_ArgumentFactory); void BM_StatusOrFactory(::testing::benchmark::State& state) { BenchmarkFactory factory; for (auto s : state) { - StatusOr result = factory.StatusOrFactory(); + absl::StatusOr result = factory.StatusOrFactory(); if (result.ok()) { result.value()->DoWork(); } @@ -622,7 +630,7 @@ void BM_ArgumentFactoryFail(::testing::benchmark::State& state) { BenchmarkFactory factory; for (auto s : state) { BenchmarkType* result = nullptr; - Status status = factory.ArgumentFactoryFail(&result); + absl::Status status = factory.ArgumentFactoryFail(&result); if (status.ok() && result != nullptr) { result->DoWork(); } @@ -635,7 +643,7 @@ BENCHMARK(BM_ArgumentFactoryFail); void BM_StatusOrFactoryFail(::testing::benchmark::State& state) { BenchmarkFactory factory; for (auto s : state) { - StatusOr result = factory.StatusOrFactoryFail(); + absl::StatusOr result = factory.StatusOrFactoryFail(); if (result.ok()) { result.value()->DoWork(); } @@ -650,7 +658,7 @@ void BM_ArgumentFactoryFailShortMsg(::testing::benchmark::State& state) { BenchmarkFactory factory; for (auto s : state) { BenchmarkType* result = nullptr; - Status status = factory.ArgumentFactoryFailShortMsg(&result); + absl::Status status = factory.ArgumentFactoryFailShortMsg(&result); if (status.ok() && result != nullptr) { result->DoWork(); } @@ -663,7 +671,8 @@ BENCHMARK(BM_ArgumentFactoryFailShortMsg); void BM_StatusOrFactoryFailShortMsg(::testing::benchmark::State& state) { BenchmarkFactory factory; for (auto s : state) { - StatusOr result = factory.StatusOrFactoryFailShortMsg(); + absl::StatusOr result = + factory.StatusOrFactoryFailShortMsg(); if (result.ok()) { result.value()->DoWork(); } @@ -678,7 +687,7 @@ void BM_ArgumentFactoryFailLongMsg(::testing::benchmark::State& state) { BenchmarkFactory factory; for (auto s : state) { BenchmarkType* result = nullptr; - Status status = factory.ArgumentFactoryFailLongMsg(&result); + absl::Status status = factory.ArgumentFactoryFailLongMsg(&result); if (status.ok() && result != nullptr) { result->DoWork(); } @@ -691,7 +700,8 @@ BENCHMARK(BM_ArgumentFactoryFailLongMsg); void BM_StatusOrFactoryFailLongMsg(::testing::benchmark::State& state) { BenchmarkFactory factory; for (auto s : state) { - StatusOr result = factory.StatusOrFactoryFailLongMsg(); + absl::StatusOr result = + factory.StatusOrFactoryFailLongMsg(); if (result.ok()) { result.value()->DoWork(); } @@ -701,22 +711,22 @@ BENCHMARK(BM_StatusOrFactoryFailLongMsg); #if defined(PLATFORM_GOOGLE) -StatusOr GetError() { +absl::StatusOr GetError() { return absl::InvalidArgumentError("An invalid argument error"); } -StatusOr PropagateError() { +absl::StatusOr PropagateError() { TF_ASSIGN_OR_RETURN(int a, GetError()); return a; } -StatusOr PropagateError2() { +absl::StatusOr PropagateError2() { TF_ASSIGN_OR_RETURN(int a, PropagateError()); return a; } TEST(Status, StackTracePropagation) { - StatusOr s = PropagateError2(); + absl::StatusOr s = PropagateError2(); auto sources = s.status().GetSourceLocations(); ASSERT_EQ(sources.size(), 3); diff --git a/third_party/xla/third_party/tsl/tsl/platform/test.h b/third_party/xla/third_party/tsl/tsl/platform/test.h index 313bfe5f0ea3dd..c8950099cad936 100644 --- a/third_party/xla/third_party/tsl/tsl/platform/test.h +++ b/third_party/xla/third_party/tsl/tsl/platform/test.h @@ -47,12 +47,6 @@ limitations under the License. #endif #include // IWYU pragma: export -#define DISABLED_ON_GPU_ROCM(X) X -#if TENSORFLOW_USE_ROCM -#undef DISABLED_ON_GPU_ROCM -#define DISABLED_ON_GPU_ROCM(X) DISABLED_##X -#endif // TENSORFLOW_USE_ROCM - namespace tsl { namespace testing { diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/BUILD b/third_party/xla/third_party/tsl/tsl/profiler/lib/BUILD index b6b43562847e0e..85ff01b6a987ce 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/BUILD +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/BUILD @@ -64,6 +64,7 @@ cc_library( "//tsl/platform:logging", "//tsl/platform:status", "//tsl/profiler/protobuf:xplane_proto_cc", + "@com_google_absl//absl/status", ], ) @@ -112,12 +113,12 @@ tsl_cc_test( ":profiler_factory_impl", ":profiler_interface", "//tsl/platform:macros", - "//tsl/platform:status", "//tsl/platform:test", "//tsl/platform:test_main", "//tsl/profiler/protobuf:profiler_options_proto_cc", "//tsl/profiler/protobuf:xplane_proto_cc", "@com_google_absl//absl/memory", + "@com_google_absl//absl/status", ], ) @@ -149,6 +150,7 @@ cc_library( "//tsl/platform:errors", "//tsl/platform:macros", "//tsl/platform:statusor", + "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings:string_view", "@local_xla//xla/tsl/util:env_var", ], @@ -159,9 +161,9 @@ tsl_cc_test( srcs = ["profiler_lock_test.cc"], deps = [ ":profiler_lock", - "//tsl/platform:statusor", "//tsl/platform:test", "//tsl/platform:test_main", + "@com_google_absl//absl/status:statusor", ], ) @@ -178,6 +180,7 @@ cc_library( "//tsl/platform:types", "//tsl/profiler/protobuf:profiler_options_proto_cc", "//tsl/profiler/protobuf:xplane_proto_cc", + "@com_google_absl//absl/status", ] + if_not_android([ ":profiler_interface", ":profiler_lock", @@ -206,6 +209,7 @@ cc_library( "//tsl/profiler/protobuf:profiler_options_proto_cc", "//tsl/profiler/protobuf:xplane_proto_cc", "@com_google_absl//absl/memory", + "@com_google_absl//absl/status", ] + if_not_android([ ":profiler_collection", ":profiler_factory", @@ -354,6 +358,7 @@ cc_library( ":profiler_interface", "//tsl/platform:status", "//tsl/profiler/protobuf:xplane_proto_cc", + "@com_google_absl//absl/status", ], ) diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_collection.cc b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_collection.cc index 762a23cfe1b3e0..f3ffec62bff7f7 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_collection.cc +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_collection.cc @@ -18,7 +18,7 @@ limitations under the License. #include #include -#include "tsl/platform/status.h" +#include "absl/status/status.h" #include "tsl/profiler/lib/profiler_interface.h" #include "tsl/profiler/protobuf/xplane.pb.h" diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_collection.h b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_collection.h index a73e1725d292f5..c3bede9af47c8d 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_collection.h +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_collection.h @@ -18,6 +18,7 @@ limitations under the License. #include #include +#include "absl/status/status.h" #include "tsl/platform/status.h" #include "tsl/profiler/lib/profiler_interface.h" #include "tsl/profiler/protobuf/xplane.pb.h" diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_controller.cc b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_controller.cc index 4820b97f03b84b..55fc42706dfea5 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_controller.cc +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_controller.cc @@ -19,7 +19,6 @@ limitations under the License. #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" -#include "tsl/platform/status.h" #include "tsl/profiler/lib/profiler_interface.h" #include "tsl/profiler/protobuf/xplane.pb.h" diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_controller.h b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_controller.h index d797960c6d3b26..ed88f8ec26b561 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_controller.h +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_controller.h @@ -17,6 +17,7 @@ limitations under the License. #include +#include "absl/status/status.h" #include "tsl/platform/status.h" #include "tsl/profiler/lib/profiler_interface.h" #include "tsl/profiler/protobuf/xplane.pb.h" diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory_test.cc b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory_test.cc index 94fa5c6868f4b2..a1188b9fa5563d 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory_test.cc +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_factory_test.cc @@ -18,8 +18,8 @@ limitations under the License. #include #include "absl/memory/memory.h" +#include "absl/status/status.h" #include "tsl/platform/macros.h" -#include "tsl/platform/status.h" #include "tsl/platform/test.h" #include "tsl/profiler/lib/profiler_interface.h" #include "tsl/profiler/protobuf/profiler_options.pb.h" diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock.cc b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock.cc index 08ce17a8bebd03..d32ea96fd2bf69 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock.cc +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock.cc @@ -16,10 +16,10 @@ limitations under the License. #include +#include "absl/status/statusor.h" #include "xla/tsl/util/env_var.h" #include "tsl/platform/errors.h" #include "tsl/platform/macros.h" -#include "tsl/platform/statusor.h" namespace tsl { namespace profiler { diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock.h b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock.h index c4e8b40303f4e1..478dae87b8a399 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock.h +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock.h @@ -17,6 +17,7 @@ limitations under the License. #include +#include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "tsl/platform/statusor.h" diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock_test.cc b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock_test.cc index 4a1d75eb6b1b89..2ddc56fb0b9a8d 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock_test.cc +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_lock_test.cc @@ -16,7 +16,7 @@ limitations under the License. #include -#include "tsl/platform/statusor.h" +#include "absl/status/statusor.h" #include "tsl/platform/test.h" namespace tsl { diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_session.cc b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_session.cc index c54f7969b4154f..f48b5ecb6f3f2c 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_session.cc +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_session.cc @@ -19,10 +19,10 @@ limitations under the License. #include #include "absl/memory/memory.h" +#include "absl/status/status.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/mutex.h" -#include "tsl/platform/status.h" #include "tsl/profiler/protobuf/profiler_options.pb.h" #include "tsl/profiler/protobuf/xplane.pb.h" diff --git a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_session.h b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_session.h index 7a04f42c188a4e..b503f428ff30d5 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_session.h +++ b/third_party/xla/third_party/tsl/tsl/profiler/lib/profiler_session.h @@ -19,6 +19,7 @@ limitations under the License. #include #include +#include "absl/status/status.h" #include "tsl/platform/mutex.h" #include "tsl/platform/platform.h" #include "tsl/platform/status.h" diff --git a/third_party/xla/third_party/tsl/tsl/profiler/utils/BUILD b/third_party/xla/third_party/tsl/tsl/profiler/utils/BUILD index f0145697048df9..26358c8ec80db5 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/utils/BUILD +++ b/third_party/xla/third_party/tsl/tsl/profiler/utils/BUILD @@ -446,6 +446,7 @@ tsl_cc_test( "//tsl/platform:test", "//tsl/platform:test_main", "//tsl/profiler/lib:connected_traceme", + "//tsl/profiler/protobuf:xplane_proto_cc", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/hash", ], diff --git a/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane.cc b/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane.cc index 7e8e7f3a431c97..3d06a05609a118 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane.cc +++ b/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane.cc @@ -38,16 +38,16 @@ using ::tsl::profiler::XPlane; using ::tsl::profiler::XPlaneBuilder; using ::tsl::profiler::XSpace; -void MutateXPlane(XPlane* plane, +void MutateXPlane(XPlane& plane, const std::vector>& mutator_factories) { - XPlaneBuilder plane_builder(plane); + XPlaneBuilder plane_builder(&plane); absl::flat_hash_map>> mutators_from_event_metadata_id; std::vector> line_mutators; for (const auto& mutator_factory : mutator_factories) { - auto mutators = mutator_factory->CreateMutators(&plane_builder); + auto mutators = mutator_factory->CreateMutators(plane_builder); for (auto& mutator : mutators) { if (mutator->event_metadata()) { auto id = mutator->event_metadata()->id(); @@ -63,7 +63,7 @@ void MutateXPlane(XPlane* plane, plane_builder.ForEachLine([&](XLineBuilder line_builder) { for (const auto& mutator : line_mutators) { - mutator->MutateEventsInLine(&line_builder); + mutator->MutateEventsInLine(line_builder); } if (mutators_from_event_metadata_id.empty()) return; line_builder.ForEachEvent([&](XEventBuilder event_builder) { @@ -71,7 +71,7 @@ void MutateXPlane(XPlane* plane, mutators_from_event_metadata_id.find(event_builder.MetadataId()); if (event_mutators != mutators_from_event_metadata_id.end()) { for (const auto& mutator : event_mutators->second) { - mutator->Mutate(&event_builder); + mutator->Mutate(event_builder); } } }); @@ -150,14 +150,18 @@ CreateMutatorFactories() { } // namespace void PreprocessXPlane(XPlane* plane) { + if (plane == nullptr) return; + auto mutator_factories = CreateMutatorFactories(); - MutateXPlane(plane, mutator_factories); + MutateXPlane(*plane, mutator_factories); } void PreprocessXSpace(XSpace* space) { + if (space == nullptr) return; + auto mutator_factories = CreateMutatorFactories(); for (XPlane& plane : *space->mutable_planes()) { - MutateXPlane(&plane, mutator_factories); + MutateXPlane(plane, mutator_factories); } } diff --git a/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane.h b/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane.h index 2433cd825cc842..724abb30429968 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane.h +++ b/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane.h @@ -52,9 +52,9 @@ class XplaneEventMutator { virtual ~XplaneEventMutator() = default; // Mutate event by event specified by the event_metadata. - virtual void Mutate(XEventBuilder* builder) = 0; + virtual void Mutate(XEventBuilder& builder) = 0; // Mutate line by line if event_metadata() return nullptr. - virtual void MutateEventsInLine(XLineBuilder* line) = 0; + virtual void MutateEventsInLine(XLineBuilder& line) = 0; const XEventMetadata* event_metadata() const { return event_metadata_; } @@ -70,7 +70,7 @@ class XplaneEventMutatorFactory { virtual ~XplaneEventMutatorFactory() = default; virtual std::vector> CreateMutators( - XPlaneBuilder* xplane) const = 0; + XPlaneBuilder& xplane) const = 0; protected: XplaneEventMutatorFactory() = default; @@ -84,21 +84,20 @@ class XplaneRootEventMutatorFactory : public XplaneEventMutatorFactory { public: static std::unique_ptr CreateFactory( HostEventType event_type, int64_t root_level) { - std::unique_ptr base; - base.reset(new XplaneRootEventMutatorFactory(event_type, root_level)); - return base; + return absl::WrapUnique( + new XplaneRootEventMutatorFactory(event_type, root_level)); } std::vector> CreateMutators( - XPlaneBuilder* xplane) const override { + XPlaneBuilder& xplane) const override { std::vector> mutators; - XEventMetadata* event_metadata = - xplane->GetEventMetadata(GetHostEventTypeStr(event_type_)); - if (event_metadata == nullptr) return {}; - XStatMetadata* root_metadata = - xplane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kIsRoot)); - mutators.emplace_back(std::make_unique( - event_metadata, root_metadata, root_level_)); + if (auto* event_metadata = + xplane.GetEventMetadata(GetHostEventTypeStr(event_type_))) { + XStatMetadata* root_metadata = + xplane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kIsRoot)); + mutators.emplace_back(std::make_unique( + event_metadata, *root_metadata, root_level_)); + } return mutators; } @@ -110,20 +109,20 @@ class XplaneRootEventMutatorFactory : public XplaneEventMutatorFactory { class XplaneRootEventMutator : public XplaneEventMutator { public: XplaneRootEventMutator(XEventMetadata* event_metadata, - XStatMetadata* root_stats_metadata, + XStatMetadata& root_stats_metadata, int64_t root_level) : XplaneEventMutator(event_metadata), root_stats_metadata_(root_stats_metadata), root_level_(root_level) {} - void Mutate(XEventBuilder* event_builder) override { - event_builder->SetOrAddStatValue(*root_stats_metadata_, root_level_); + void Mutate(XEventBuilder& event_builder) override { + event_builder.SetOrAddStatValue(root_stats_metadata_, root_level_); } - void MutateEventsInLine(XLineBuilder* line) override { + void MutateEventsInLine(XLineBuilder& line) override { CHECK(false); // Crash OK } private: - XStatMetadata* root_stats_metadata_; + XStatMetadata& root_stats_metadata_; int64_t root_level_; }; @@ -136,18 +135,19 @@ class XContextStatsAccessor { public: using value_type = StatValueType; - bool Initialize(XPlaneBuilder* xplane) { - stats_metadata_ = xplane->GetStatMetadata(GetStatTypeStr(kStatId)); + bool Initialize(XPlaneBuilder& xplane) { + stats_metadata_ = xplane.GetStatMetadata(GetStatTypeStr(kStatId)); return stats_metadata_; } - std::optional GetStat(XEventBuilder* event_builder) { - auto* stat = event_builder->GetStat(*stats_metadata_); + std::optional GetStat(XEventBuilder& event_builder) { + if (stats_metadata_ == nullptr) return std::nullopt; + auto* stat = event_builder.GetStat(*stats_metadata_); if (stat == nullptr) return std::nullopt; if constexpr (std::is_integral_v) { - return event_builder->IntOrUintValue(*stat); + return event_builder.IntOrUintValue(*stat); } else { - return event_builder->StrOrRefValue(*stat); + return event_builder.StrOrRefValue(*stat); } } @@ -160,19 +160,19 @@ class XContextStatsAccessorWithDefault { public: using value_type = StatValueType; - bool Initialize(XPlaneBuilder* xplane) { - stats_metadata_ = xplane->GetStatMetadata(GetStatTypeStr(kStatId)); + bool Initialize(XPlaneBuilder& xplane) { + stats_metadata_ = xplane.GetStatMetadata(GetStatTypeStr(kStatId)); return true; // Always return true, even stat_metadata doesn't exist. } - std::optional GetStat(XEventBuilder* event_builder) { + std::optional GetStat(XEventBuilder& event_builder) { if (stats_metadata_ == nullptr) return kDefaultValue; - auto* stat = event_builder->GetStat(*stats_metadata_); + auto* stat = event_builder.GetStat(*stats_metadata_); if (stat == nullptr) return kDefaultValue; if constexpr (std::is_integral_v) { - return event_builder->IntOrUintValue(*stat); + return event_builder.IntOrUintValue(*stat); } else { - return event_builder->StrOrRefValue(*stat); + return event_builder.StrOrRefValue(*stat); } } @@ -222,40 +222,40 @@ class XplaneConnectedEventMutatorFactory : public XplaneEventMutatorFactory { using StatsAccessors = std::tuple; std::vector> CreateMutators( - XPlaneBuilder* xplane) const override { + XPlaneBuilder& xplane) const override { // Check if all stats exist in current plane. StatsAccessors stats_accessors; bool all_required_stats_exist = true; auto check_stats_meta = [&all_required_stats_exist, - xplane](auto&& accessor) { - if (all_required_stats_exist == false) return; - if (!accessor.Initialize(xplane)) all_required_stats_exist = false; + &xplane](auto&& accessor) { + all_required_stats_exist = + all_required_stats_exist && accessor.Initialize(xplane); }; for_each(stats_accessors, check_stats_meta); if (!all_required_stats_exist) return {}; XEventMetadata* producer_event_metadata = - xplane->GetEventMetadata(GetHostEventTypeStr(producer_event)); + xplane.GetEventMetadata(GetHostEventTypeStr(producer_event)); XEventMetadata* consumer_event_metadata = - xplane->GetEventMetadata(GetHostEventTypeStr(consumer_event)); + xplane.GetEventMetadata(GetHostEventTypeStr(consumer_event)); std::vector> mutators; if (producer_event_metadata) { - XStatMetadata* context_type_metadata = xplane->GetOrCreateStatMetadata( + XStatMetadata* context_type_metadata = xplane.GetOrCreateStatMetadata( GetStatTypeStr(StatType::kProducerType)); - XStatMetadata* context_id_metadata = xplane->GetOrCreateStatMetadata( - GetStatTypeStr(StatType::kProducerId)); + XStatMetadata* context_id_metadata = + xplane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kProducerId)); mutators.emplace_back(std::make_unique( - producer_event_metadata, context_type_metadata, context_id_metadata, + producer_event_metadata, *context_type_metadata, *context_id_metadata, stats_accessors)); } if (consumer_event_metadata) { - XStatMetadata* context_type_metadata = xplane->GetOrCreateStatMetadata( + XStatMetadata* context_type_metadata = xplane.GetOrCreateStatMetadata( GetStatTypeStr(StatType::kConsumerType)); - XStatMetadata* context_id_metadata = xplane->GetOrCreateStatMetadata( - GetStatTypeStr(StatType::kConsumerId)); + XStatMetadata* context_id_metadata = + xplane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kConsumerId)); mutators.emplace_back(std::make_unique( - consumer_event_metadata, context_type_metadata, context_id_metadata, + consumer_event_metadata, *context_type_metadata, *context_id_metadata, stats_accessors)); } return mutators; @@ -267,19 +267,19 @@ class XplaneConnectedEventMutatorFactory : public XplaneEventMutatorFactory { class XplaneConnectedEventMutator : public XplaneEventMutator { public: XplaneConnectedEventMutator(XEventMetadata* event_metadata, - XStatMetadata* context_type_metadata, - XStatMetadata* context_id_metadata, + XStatMetadata& context_type_metadata, + XStatMetadata& context_id_metadata, const StatsAccessors& accessors) : XplaneEventMutator(event_metadata), context_type_metadata_(context_type_metadata), context_id_metadata_(context_id_metadata), accessors_(accessors) {} - void Mutate(XEventBuilder* event_builder) override { + void Mutate(XEventBuilder& event_builder) override { bool all_required_stats_exist = true; std::vector> required_stats; auto check_stats_meta = [&all_required_stats_exist, &required_stats, - event_builder](auto&& accessor) { + &event_builder](auto&& accessor) { if (all_required_stats_exist == false) return; auto stats_data = accessor.GetStat(event_builder); if (!stats_data) { @@ -299,18 +299,18 @@ class XplaneConnectedEventMutatorFactory : public XplaneEventMutatorFactory { context_id = absl::HashOf(producer_event, consumer_event, required_stats); } - event_builder->SetOrAddStatValue(*context_type_metadata_, - static_cast(context_type)); - event_builder->SetOrAddStatValue(*context_id_metadata_, context_id); + event_builder.SetOrAddStatValue(context_type_metadata_, + static_cast(context_type)); + event_builder.SetOrAddStatValue(context_id_metadata_, context_id); } - void MutateEventsInLine(XLineBuilder* line) override { + void MutateEventsInLine(XLineBuilder& line) override { CHECK(false); // Crash OK } private: - XStatMetadata* context_type_metadata_; - XStatMetadata* context_id_metadata_; + XStatMetadata& context_type_metadata_; + XStatMetadata& context_id_metadata_; StatsAccessors accessors_; }; }; @@ -323,17 +323,18 @@ class HostRunIdMutatorFactory : public XplaneEventMutatorFactory { } std::vector> CreateMutators( - XPlaneBuilder* xplane) const override { + XPlaneBuilder& xplane) const override { std::vector> mutators; - XEventMetadata* event_metadata = - xplane->GetEventMetadata(GetHostEventTypeStr(event_type)); - if (event_metadata == nullptr) return {}; - XContextStatsAccessor run_id_stats_accessor; - run_id_stats_accessor.Initialize(xplane); - XStatMetadata* run_id_metadata = - xplane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kRunId)); - mutators.emplace_back(std::make_unique( - event_metadata, run_id_stats_accessor, run_id_metadata)); + if (auto* event_metadata = + xplane.GetEventMetadata(GetHostEventTypeStr(event_type))) { + XContextStatsAccessor run_id_stats_accessor; + if (run_id_stats_accessor.Initialize(xplane)) { + XStatMetadata* run_id_metadata = + xplane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kRunId)); + mutators.emplace_back(std::make_unique( + event_metadata, run_id_stats_accessor, *run_id_metadata)); + } + } return mutators; } @@ -344,25 +345,25 @@ class HostRunIdMutatorFactory : public XplaneEventMutatorFactory { HostRunIdMutator( XEventMetadata* event_metadata, XContextStatsAccessor run_id_stats_accessor, - XStatMetadata* run_id_metadata) + XStatMetadata& run_id_metadata) : XplaneEventMutator(event_metadata), run_id_stats_accessor_(run_id_stats_accessor), run_id_metadata_(run_id_metadata) {} - void Mutate(XEventBuilder* event_builder) override { + void Mutate(XEventBuilder& event_builder) override { auto run_id = run_id_stats_accessor_.GetStat(event_builder); if (!run_id) return; int64_t fixed_run_id = ((uint64_t)run_id.value() & kRunIdMask); - event_builder->SetOrAddStatValue(*run_id_metadata_, fixed_run_id); + event_builder.SetOrAddStatValue(run_id_metadata_, fixed_run_id); } - void MutateEventsInLine(XLineBuilder* line) override { + void MutateEventsInLine(XLineBuilder& line) override { CHECK(false); // Crash OK } private: XContextStatsAccessor run_id_stats_accessor_; - XStatMetadata* run_id_metadata_; + XStatMetadata& run_id_metadata_; }; }; @@ -377,27 +378,28 @@ class TpuModuleLineMutatorFactory : public XplaneEventMutatorFactory { } std::vector> CreateMutators( - XPlaneBuilder* xplane) const override { + XPlaneBuilder& xplane) const override { std::vector> mutators; - if (absl::StartsWith(xplane->Name(), kTpuPlanePrefix) && - GetTensorCoreId(xplane->Name()).has_value()) { - if (auto device_ordinal = ParseDeviceOrdinal(xplane->Name())) { - XStatMetadata* context_type_metadata = xplane->GetOrCreateStatMetadata( + if (absl::StartsWith(xplane.Name(), kTpuPlanePrefix) && + GetTensorCoreId(xplane.Name()).has_value()) { + if (auto device_ordinal = ParseDeviceOrdinal(xplane.Name())) { + XStatMetadata* context_type_metadata = xplane.GetOrCreateStatMetadata( GetStatTypeStr(StatType::kConsumerType)); - XStatMetadata* context_id_metadata = xplane->GetOrCreateStatMetadata( + XStatMetadata* context_id_metadata = xplane.GetOrCreateStatMetadata( GetStatTypeStr(StatType::kConsumerId)); XContextStatsAccessor queue_id_stats_accessor; XContextStatsAccessor run_id_stats_accessor; XContextStatsAccessorWithDefault core_type_stats_accessor; - queue_id_stats_accessor.Initialize(xplane); - run_id_stats_accessor.Initialize(xplane); - core_type_stats_accessor.Initialize(xplane); - mutators.emplace_back(std::make_unique( - *device_ordinal, context_type_metadata, context_id_metadata, - queue_id_stats_accessor, run_id_stats_accessor, - core_type_stats_accessor)); + if (queue_id_stats_accessor.Initialize(xplane) && + run_id_stats_accessor.Initialize(xplane) && + core_type_stats_accessor.Initialize(xplane)) { + mutators.emplace_back(std::make_unique( + *device_ordinal, *context_type_metadata, *context_id_metadata, + queue_id_stats_accessor, run_id_stats_accessor, + core_type_stats_accessor)); + } } } return mutators; @@ -409,8 +411,8 @@ class TpuModuleLineMutatorFactory : public XplaneEventMutatorFactory { class TpuModuleLineMutator : public XplaneEventMutator { public: TpuModuleLineMutator( - uint32_t device_ordinal, XStatMetadata* context_type_metadata, - XStatMetadata* context_id_metadata, + uint32_t device_ordinal, XStatMetadata& context_type_metadata, + XStatMetadata& context_id_metadata, XContextStatsAccessor queue_id_stats_accessor, XContextStatsAccessor run_id_stats_accessor, @@ -424,16 +426,16 @@ class TpuModuleLineMutatorFactory : public XplaneEventMutatorFactory { run_id_stats_accessor_(run_id_stats_accessor), core_type_stats_accessor_(core_type_stats_accessor) {} - void Mutate(XEventBuilder* event_builder) override { + void Mutate(XEventBuilder& event_builder) override { CHECK(false); // Crash OK } - void MutateEventsInLine(XLineBuilder* line) override { - if (line->Name() != kXlaModuleLineName) return; - line->ForEachEvent([&](XEventBuilder event) { - auto run_id = run_id_stats_accessor_.GetStat(&event); - auto queue_id = queue_id_stats_accessor_.GetStat(&event); - auto core_type = core_type_stats_accessor_.GetStat(&event); + void MutateEventsInLine(XLineBuilder& line) override { + if (line.Name() != kXlaModuleLineName) return; + line.ForEachEvent([&](XEventBuilder event) { + auto run_id = run_id_stats_accessor_.GetStat(event); + auto queue_id = queue_id_stats_accessor_.GetStat(event); + auto core_type = core_type_stats_accessor_.GetStat(event); if (!run_id || !queue_id) return; // The order of tuple need to be // consistent with other kTpuLaunch types. @@ -444,16 +446,16 @@ class TpuModuleLineMutatorFactory : public XplaneEventMutatorFactory { required_stats.emplace_back(*run_id); required_stats.emplace_back(static_cast(*core_type)); int64_t context_id = absl::HashOf(required_stats); - event.SetOrAddStatValue(*context_type_metadata_, + event.SetOrAddStatValue(context_type_metadata_, static_cast(ContextType::kTpuLaunch)); - event.SetOrAddStatValue(*context_id_metadata_, context_id); + event.SetOrAddStatValue(context_id_metadata_, context_id); }); } private: uint64_t device_ordinal_; - XStatMetadata* context_type_metadata_; - XStatMetadata* context_id_metadata_; + XStatMetadata& context_type_metadata_; + XStatMetadata& context_id_metadata_; XContextStatsAccessor queue_id_stats_accessor_; XContextStatsAccessor run_id_stats_accessor_; @@ -473,7 +475,7 @@ class ThreadpoolLineMutatorFactory : public XplaneEventMutatorFactory { } std::vector> CreateMutators( - XPlaneBuilder* xplane) const override { + XPlaneBuilder& xplane) const override { std::vector> mutators; mutators.emplace_back(std::make_unique(xplane)); return mutators; @@ -484,25 +486,25 @@ class ThreadpoolLineMutatorFactory : public XplaneEventMutatorFactory { class ThreadpoolLineMutator : public XplaneEventMutator { public: - explicit ThreadpoolLineMutator(XPlaneBuilder* xplane) + explicit ThreadpoolLineMutator(XPlaneBuilder& xplane) : XplaneEventMutator(nullptr), xplane_(xplane) { start_region_metadata_ = - xplane_->GetEventMetadata(kThreadpoolListenerStartRegion); + xplane_.GetEventMetadata(kThreadpoolListenerStartRegion); stop_region_metadata_ = - xplane_->GetEventMetadata(kThreadpoolListenerStopRegion); + xplane_.GetEventMetadata(kThreadpoolListenerStopRegion); thread_pool_metadata_ = - xplane_->GetOrCreateEventMetadata(kThreadpoolListenerRegion); - consumer_ = xplane_->GetOrCreateStatMetadata( + xplane_.GetOrCreateEventMetadata(kThreadpoolListenerRegion); + consumer_ = xplane_.GetOrCreateStatMetadata( GetStatTypeStr(StatType::kConsumerId)); - consumer_type_ = xplane_->GetOrCreateStatMetadata( + consumer_type_ = xplane_.GetOrCreateStatMetadata( GetStatTypeStr(StatType::kConsumerType)); } - void Mutate(XEventBuilder* event_builder) override { + void Mutate(XEventBuilder& event_builder) override { CHECK(false); // Crash OK } - void MutateEventsInLine(XLineBuilder* line) override { + void MutateEventsInLine(XLineBuilder& line) override { if (start_region_metadata_ == nullptr || stop_region_metadata_ == nullptr) { // Skip mutations for xplanes that do not have region markers. These @@ -519,7 +521,7 @@ class ThreadpoolLineMutatorFactory : public XplaneEventMutatorFactory { }; std::vector event_metadata; - line->ForEachEvent([&](const XEventBuilder& event) { + line.ForEachEvent([&](const XEventBuilder& event) { if (event.MetadataId() == start_region_metadata_->id()) { auto consumer_id = event.GetStat(*consumer_); if (!consumer_id) return; @@ -535,7 +537,7 @@ class ThreadpoolLineMutatorFactory : public XplaneEventMutatorFactory { } }); for (const auto& event_metadata : event_metadata) { - XEventBuilder region = line->AddEvent(*thread_pool_metadata_); + XEventBuilder region = line.AddEvent(*thread_pool_metadata_); region.SetTimestampPs(event_metadata.start_region_timestamp_ps); region.SetEndTimestampPs(event_metadata.end_region_timestamp_ps); region.SetOrAddStatValue(*consumer_, event_metadata.region_id); @@ -548,7 +550,7 @@ class ThreadpoolLineMutatorFactory : public XplaneEventMutatorFactory { private: XStatMetadata* consumer_; XStatMetadata* consumer_type_; - XPlaneBuilder* xplane_; + XPlaneBuilder& xplane_; XEventMetadata* start_region_metadata_; XEventMetadata* stop_region_metadata_; XEventMetadata* thread_pool_metadata_; diff --git a/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane_test.cc b/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane_test.cc index 5912c00c397853..9712893645090c 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane_test.cc +++ b/third_party/xla/third_party/tsl/tsl/profiler/utils/preprocess_xplane_test.cc @@ -16,12 +16,14 @@ limitations under the License. #include "tsl/profiler/utils/preprocess_xplane.h" #include +#include #include #include "absl/container/flat_hash_map.h" #include "absl/hash/hash.h" #include "tsl/platform/test.h" #include "tsl/profiler/lib/connected_traceme.h" +#include "tsl/profiler/protobuf/xplane.pb.h" #include "tsl/profiler/utils/tf_xplane_visitor.h" #include "tsl/profiler/utils/xplane_builder.h" #include "tsl/profiler/utils/xplane_schema.h" @@ -290,6 +292,19 @@ TEST(PreprocessXPlane, ThreadPoolPreprocessorTest) { EXPECT_TRUE(new_event_added); } +TEST(PreprocessXPlane, XContextStatsAccessorNPETest) { + auto xplane = std::make_unique(); + XPlaneBuilder xplane_builder(xplane.get()); + XLine xline; + XLineBuilder xline_builder(&xline, &xplane_builder); + XEvent xevent; + XEventBuilder xevent_builder(&xline, &xplane_builder, &xevent); + XContextStatsAccessor run_id_accessor; + + ASSERT_FALSE(run_id_accessor.Initialize(xplane_builder)); + EXPECT_EQ(run_id_accessor.GetStat(xevent_builder), std::nullopt); +} + } // namespace } // namespace profiler } // namespace tsl diff --git a/third_party/xla/third_party/tsl/tsl/profiler/utils/xplane_utils.cc b/third_party/xla/third_party/tsl/tsl/profiler/utils/xplane_utils.cc index 88c7e30b76eee5..3e50fee7f7ce4a 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/utils/xplane_utils.cc +++ b/third_party/xla/third_party/tsl/tsl/profiler/utils/xplane_utils.cc @@ -224,6 +224,16 @@ const XLine* FindLineWithId(const XPlane& plane, int64_t id) { Find(plane.lines(), [id](const XLine* line) { return line->id() == id; }); return (i != -1) ? &plane.lines(i) : nullptr; } +std::vector FindLinesWithId(const XPlane& plane, int64_t id) { + std::vector indices = FindAll( + plane.lines(), [id](const XLine* line) { return line->id() == id; }); + std::vector lines; + lines.reserve(indices.size()); + for (int index : indices) { + lines.push_back(&plane.lines(index)); + } + return lines; +} const XLine* FindLineWithName(const XPlane& plane, absl::string_view name) { int i = Find(plane.lines(), diff --git a/third_party/xla/third_party/tsl/tsl/profiler/utils/xplane_utils.h b/third_party/xla/third_party/tsl/tsl/profiler/utils/xplane_utils.h index d0177632263455..8ea1429c1d90d2 100644 --- a/third_party/xla/third_party/tsl/tsl/profiler/utils/xplane_utils.h +++ b/third_party/xla/third_party/tsl/tsl/profiler/utils/xplane_utils.h @@ -83,6 +83,7 @@ std::vector FindMutablePlanesWithPrefix(XSpace* space, // Returns the plane with the given id/name or nullptr if not found. const XLine* FindLineWithId(const XPlane& plane, int64_t id); +std::vector FindLinesWithId(const XPlane& plane, int64_t id); const XLine* FindLineWithName(const XPlane& plane, absl::string_view name); XStat* FindOrAddMutableStat(const XStatMetadata& stat_metadata, XEvent* event); diff --git a/third_party/xla/third_party/tsl/workspace2.bzl b/third_party/xla/third_party/tsl/workspace2.bzl index 001ce018d87066..67011d3d2bb3ea 100644 --- a/third_party/xla/third_party/tsl/workspace2.bzl +++ b/third_party/xla/third_party/tsl/workspace2.bzl @@ -505,12 +505,6 @@ def _tf_repositories(): urls = tf_mirror_urls("https://github.com/google/double-conversion/archive/v3.2.0.tar.gz"), ) - tf_http_archive( - name = "rules_python", - sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161", - urls = tf_mirror_urls("https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz"), - ) - tf_http_archive( name = "build_bazel_rules_android", sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806", @@ -597,6 +591,7 @@ def _tf_repositories(): urls = tf_mirror_urls("https://github.com/google/glog/archive/refs/tags/v0.4.0.tar.gz"), ) +# buildifier: disable=unnamed-macro def workspace(): # Check the bazel version before executing any repository rules, in case # those rules rely on the version we require here. diff --git a/third_party/xla/xla/BUILD b/third_party/xla/xla/BUILD index b22f7021257508..304ed5a4d2b031 100644 --- a/third_party/xla/xla/BUILD +++ b/third_party/xla/xla/BUILD @@ -224,7 +224,10 @@ cc_library( "frontend_attributes.h", ], visibility = internal_visibility([":friends"]), - deps = ["//xla/hlo/ir:hlo"], + deps = [ + ":xla_data_proto_cc", + "//xla/hlo/ir:hlo", + ], ) cc_library( @@ -341,6 +344,7 @@ cc_library( hdrs = [ "iterator_util.h", "map_util.h", + "maybe_owning.h", "overflow_util.h", "util.h", ], diff --git a/third_party/xla/xla/array.h b/third_party/xla/xla/array.h index 276b3b556c4a76..fe33a49335ac26 100644 --- a/third_party/xla/xla/array.h +++ b/third_party/xla/xla/array.h @@ -314,32 +314,32 @@ class Array { // Invokes a callback with the (indices, value_ptr) for each cell in the // array. If a callback returns a non-OK status, returns that else returns - // OkStatus(). - Status EachStatus( - absl::FunctionRef, T*)> f) { + // absl::OkStatus(). + absl::Status EachStatus( + absl::FunctionRef, T*)> f) { OwnedBuffer index(sizes_.size, default_init_t{}); for (int64_t i = 0; i < num_elements(); ++i, next_index(&index)) { - Status s = f(index.span(), &values_[i]); + absl::Status s = f(index.span(), &values_[i]); if (!s.ok()) { return s; } } - return OkStatus(); + return absl::OkStatus(); } // Invokes a callback with the (indices, value) for each cell in the array. // If a callback returns a non-OK status, returns that else returns - // OkStatus(). - Status EachStatus( - absl::FunctionRef, T)> f) const { + // absl::OkStatus(). + absl::Status EachStatus( + absl::FunctionRef, T)> f) const { OwnedBuffer index(sizes_.size, default_init_t{}); for (int64_t i = 0; i < num_elements(); ++i, next_index(&index)) { - Status s = f(index.span(), values_[i]); + absl::Status s = f(index.span(), values_[i]); if (!s.ok()) { return s; } } - return OkStatus(); + return absl::OkStatus(); } // Returns the value at the cell specified by the indexes. The number of diff --git a/third_party/xla/xla/backends/interpreter/compiler.cc b/third_party/xla/xla/backends/interpreter/compiler.cc index e48f63f77929ae..2409d9602671e0 100644 --- a/third_party/xla/xla/backends/interpreter/compiler.cc +++ b/third_party/xla/xla/backends/interpreter/compiler.cc @@ -89,7 +89,7 @@ absl::StatusOr HandleEvaluatorCustomCall( } // namespace -Status InterpreterCompiler::RunHloOptimization(HloModule* hlo_module) { +absl::Status InterpreterCompiler::RunHloOptimization(HloModule* hlo_module) { HloPassPipeline pipeline("Interpreter"); // The TopkDecomposer generates a compare op with type=TOTALORDER and must diff --git a/third_party/xla/xla/backends/interpreter/compiler.h b/third_party/xla/xla/backends/interpreter/compiler.h index cfdbaa4fd23928..1f234f161befb1 100644 --- a/third_party/xla/xla/backends/interpreter/compiler.h +++ b/third_party/xla/xla/backends/interpreter/compiler.h @@ -62,7 +62,7 @@ class InterpreterCompiler : public Compiler { se::Platform::Id PlatformId() const override; private: - Status RunHloOptimization(HloModule* hlo_module); + absl::Status RunHloOptimization(HloModule* hlo_module); InterpreterCompiler(const InterpreterCompiler&) = delete; InterpreterCompiler& operator=(const InterpreterCompiler&) = delete; diff --git a/third_party/xla/xla/backends/interpreter/executable_base.cc b/third_party/xla/xla/backends/interpreter/executable_base.cc index 0bb1639daa58a0..a58c811c87e257 100644 --- a/third_party/xla/xla/backends/interpreter/executable_base.cc +++ b/third_party/xla/xla/backends/interpreter/executable_base.cc @@ -171,7 +171,7 @@ InterpreterExecutableBase::AllocateOutputMemoryWithInputReuse( alias->ToString()); } } - return OkStatus(); + return absl::OkStatus(); })); se::StreamExecutor* executor = stream->parent(); diff --git a/third_party/xla/xla/backends/interpreter/executor.h b/third_party/xla/xla/backends/interpreter/executor.h index 909d7fbb1bc513..b77ca8d120ec7c 100644 --- a/third_party/xla/xla/backends/interpreter/executor.h +++ b/third_party/xla/xla/backends/interpreter/executor.h @@ -114,8 +114,6 @@ class XlaInterpreterExecutor : public StreamExecutor { bool HostCallback(Stream *stream, absl::AnyInvocable callback) override; - absl::Status AllocateEvent(Event *event) override { return absl::OkStatus(); } - absl::Status DeallocateEvent(Event *event) override { return absl::OkStatus(); } @@ -132,7 +130,6 @@ class XlaInterpreterExecutor : public StreamExecutor { return Event::Status::kError; } - bool AllocateStream(Stream *stream) override { return true; } void DeallocateStream(Stream *stream) override {} bool CreateStreamDependency(Stream *dependent, Stream *other) override; @@ -157,9 +154,8 @@ class XlaInterpreterExecutor : public StreamExecutor { bool CanEnablePeerAccessTo(StreamExecutorInterface *other) override { return true; } - - std::unique_ptr CreateEventImplementation() override { - return nullptr; + absl::StatusOr> CreateEvent() override { + return std::make_unique(this, nullptr); } absl::StatusOr> CreateStream( @@ -167,7 +163,6 @@ class XlaInterpreterExecutor : public StreamExecutor { std::nullopt) override { auto stream = std::make_unique(this, std::make_unique()); - TF_RETURN_IF_ERROR(stream->Initialize(priority)); return std::move(stream); } @@ -175,11 +170,6 @@ class XlaInterpreterExecutor : public StreamExecutor { // The device ordinal value that this executor was initialized with; recorded // for use in getting device metadata. Immutable post-initialization. int device_ordinal_; - - DeviceMemoryBase AllocateSingleOutput(const xla::Shape &shape); - - absl::StatusOr AllocateOutputBuffer( - const xla::Shape &shape); }; } // namespace interpreter diff --git a/third_party/xla/xla/backends/profiler/cpu/metadata_collector.cc b/third_party/xla/xla/backends/profiler/cpu/metadata_collector.cc index a19d4527ec930c..ebb73c2ca5fc7b 100644 --- a/third_party/xla/xla/backends/profiler/cpu/metadata_collector.cc +++ b/third_party/xla/xla/backends/profiler/cpu/metadata_collector.cc @@ -42,23 +42,23 @@ class MetadataCollector : public tsl::profiler::ProfilerInterface { public: MetadataCollector() = default; - Status Start() override { + absl::Status Start() override { if (!trace_active_) { xla::XlaDebugInfoManager::Get()->StartTracing(); trace_active_ = true; } - return OkStatus(); + return absl::OkStatus(); } - Status Stop() override { + absl::Status Stop() override { if (trace_active_) { xla::XlaDebugInfoManager::Get()->StopTracing(&debug_info_); trace_active_ = false; } - return OkStatus(); + return absl::OkStatus(); } - Status CollectData(tsl::profiler::XSpace* space) override { + absl::Status CollectData(tsl::profiler::XSpace* space) override { if (!debug_info_.empty()) { tsl::profiler::XPlane* plane = tsl::profiler::FindOrAddMutablePlaneWithName( @@ -70,7 +70,7 @@ class MetadataCollector : public tsl::profiler::ProfilerInterface { } debug_info_.clear(); } - return OkStatus(); + return absl::OkStatus(); } private: diff --git a/third_party/xla/xla/backends/profiler/gpu/BUILD b/third_party/xla/xla/backends/profiler/gpu/BUILD index ecf4b7647670e9..8f7429713fcd0c 100644 --- a/third_party/xla/xla/backends/profiler/gpu/BUILD +++ b/third_party/xla/xla/backends/profiler/gpu/BUILD @@ -222,23 +222,27 @@ tsl_gpu_library( visibility = ["//visibility:public"], deps = [ "//xla/stream_executor/rocm:roctracer_wrapper", + "//xla/tsl/util:env_var", "@com_google_absl//absl/container:fixed_array", "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/container:node_hash_map", "@com_google_absl//absl/container:node_hash_set", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/types:optional", - "@local_tsl//tsl/platform:env", + "@local_tsl//tsl/platform:abi", + "@local_tsl//tsl/platform:env_time", "@local_tsl//tsl/platform:errors", - "@local_tsl//tsl/platform:logging", "@local_tsl//tsl/platform:macros", - "@local_tsl//tsl/platform:platform_port", + "@local_tsl//tsl/platform:mutex", + "@local_tsl//tsl/platform:status", + "@local_tsl//tsl/platform:thread_annotations", + "@local_tsl//tsl/platform:types", "@local_tsl//tsl/profiler/backends/cpu:annotation_stack", "@local_tsl//tsl/profiler/lib:profiler_factory", "@local_tsl//tsl/profiler/lib:profiler_interface", - "@local_tsl//tsl/profiler/protobuf:xplane_proto_cc", "@local_tsl//tsl/profiler/utils:parse_annotation", - "@local_tsl//tsl/profiler/utils:time_utils", - "@local_tsl//tsl/profiler/utils:trace_utils", "@local_tsl//tsl/profiler/utils:xplane_builder", "@local_tsl//tsl/profiler/utils:xplane_schema", "@local_tsl//tsl/profiler/utils:xplane_utils", @@ -256,16 +260,18 @@ tsl_gpu_library( "//xla/stream_executor/rocm:roctracer_wrapper", "@com_google_absl//absl/container:fixed_array", "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/container:node_hash_map", "@com_google_absl//absl/container:node_hash_set", - "@com_google_absl//absl/status", + "@com_google_absl//absl/types:optional", "@local_tsl//tsl/platform:env", "@local_tsl//tsl/platform:errors", "@local_tsl//tsl/platform:logging", "@local_tsl//tsl/platform:macros", "@local_tsl//tsl/platform:platform_port", + "@local_tsl//tsl/platform:status", + "@local_tsl//tsl/platform:types", "@local_tsl//tsl/profiler/backends/cpu:annotation_stack", - "@local_tsl//tsl/profiler/protobuf:xplane_proto_cc", "@local_tsl//tsl/profiler/utils:time_utils", ], ) diff --git a/third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events.cc b/third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events.cc index 8b0d6977eba605..4c3ce94e1651a4 100644 --- a/third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events.cc +++ b/third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events.cc @@ -26,7 +26,6 @@ namespace profiler { namespace { using absl::OkStatus; -using absl::Status; using absl::StatusCode; // CUPTI from CUDA 11.6 adds information about the hardware channel that ops @@ -365,11 +364,10 @@ void AddSynchronizationActivityEvent( collector.receive(std::move(event)); } -static Status ConvertActivityBuffer(CuptiEventCollectorDelegate &collector, - uint8_t *buffer, const size_t size, - const size_t max_activity_event_count, - size_t &total_activity_event_count, - size_t &dropped_activity_event_count) { +static absl::Status ConvertActivityBuffer( + CuptiEventCollectorDelegate &collector, uint8_t *buffer, const size_t size, + const size_t max_activity_event_count, size_t &total_activity_event_count, + size_t &dropped_activity_event_count) { CuptiInterface *cupti_interface = GetCuptiInterface(); CUpti_Activity *record = nullptr; while (true) { @@ -430,8 +428,8 @@ static Status ConvertActivityBuffer(CuptiEventCollectorDelegate &collector, break; } else { LOG(WARNING) << "CUPTI parse ACTIVITY buffer error: " << status; - return Status(StatusCode::kInternal, - "Parse cupti activity buffer error."); + return absl::Status(StatusCode::kInternal, + "Parse cupti activity buffer error."); } } VLOG(3) << "CUPTI tracer post-process one ACTIVITY buffer of size: " << size diff --git a/third_party/xla/xla/backends/profiler/gpu/cupti_tracer.cc b/third_party/xla/xla/backends/profiler/gpu/cupti_tracer.cc index 67e854ed9d3049..913171fc960d76 100644 --- a/third_party/xla/xla/backends/profiler/gpu/cupti_tracer.cc +++ b/third_party/xla/xla/backends/profiler/gpu/cupti_tracer.cc @@ -35,7 +35,6 @@ namespace profiler { namespace { using absl::OkStatus; -using absl::Status; using tsl::Env; using tsl::profiler::AnnotationStack; @@ -49,7 +48,7 @@ class CuptiApiTracingDisabler { ~CuptiApiTracingDisabler() { internalCuCall--; } }; -Status ToStatus(CUptiResult result) { +absl::Status ToStatus(CUptiResult result) { if (result == CUPTI_SUCCESS) { return OkStatus(); } @@ -58,7 +57,7 @@ Status ToStatus(CUptiResult result) { return tsl::errors::Unavailable("CUPTI error: ", str ? str : ""); } -Status ToStatus(CUresult result) { +absl::Status ToStatus(CUresult result) { if (result == CUDA_SUCCESS) { return OkStatus(); } @@ -67,7 +66,7 @@ Status ToStatus(CUresult result) { return tsl::errors::Unavailable("CUDA error: ", str ? str : ""); } -inline void LogIfError(const Status &status) { +inline void LogIfError(const absl::Status &status) { if (status.ok()) return; LOG(ERROR) << status.message(); } @@ -302,8 +301,9 @@ void CUPTIAPI ProcessCuptiActivityBuffer(CUcontext context, uint32_t stream_id, << " size: " << size << " valid_size: " << valid_size; VLOG(3) << "Activity profile for stream " << stream_id; - Status status = CuptiTracer::GetCuptiTracerSingleton()->ProcessActivityBuffer( - context, stream_id, buffer, valid_size); + absl::Status status = + CuptiTracer::GetCuptiTracerSingleton()->ProcessActivityBuffer( + context, stream_id, buffer, valid_size); if (!status.ok()) { LOG(ERROR) << status; } @@ -657,17 +657,17 @@ class CuptiDriverApiHookWithActivityApi : public CuptiDriverApiHook { cupti_interface_(cupti_interface), collector_(collector) {} - Status OnDriverApiEnter(int device_id, CUpti_CallbackDomain domain, - CUpti_CallbackId cbid, - const CUpti_CallbackData *cbdata) override { + absl::Status OnDriverApiEnter(int device_id, CUpti_CallbackDomain domain, + CUpti_CallbackId cbid, + const CUpti_CallbackData *cbdata) override { // Stash away the current Cupti timestamp into cbdata. *cbdata->correlationData = option_.required_callback_api_events ? CuptiTracer::GetTimestamp() : 0; return OkStatus(); } - Status OnDriverApiExit(int device_id, CUpti_CallbackDomain domain, - CUpti_CallbackId cbid, - const CUpti_CallbackData *cbdata) override { + absl::Status OnDriverApiExit(int device_id, CUpti_CallbackDomain domain, + CUpti_CallbackId cbid, + const CUpti_CallbackData *cbdata) override { // If we are not collecting CPU events from Callback API, we can return now. if (!option_.required_callback_api_events) { return OkStatus(); @@ -680,7 +680,7 @@ class CuptiDriverApiHookWithActivityApi : public CuptiDriverApiHook { return AddDriverApiCallbackEvent(collector_, cupti_interface_, device_id, start_tsc, end_tsc, domain, cbid, cbdata); } - Status SyncAndFlush() override { + absl::Status SyncAndFlush() override { if (option_.sync_devices_before_stop) { CuptiApiTracingDisabler disabler; absl::MutexLock lock(&mutex_); @@ -724,7 +724,7 @@ class CuptiDriverApiHookWithActivityApi : public CuptiDriverApiHook { } // namespace -/*static*/ Status CuptiDriverApiHook::AddDriverApiCallbackEvent( +/*static*/ absl::Status CuptiDriverApiHook::AddDriverApiCallbackEvent( CuptiTraceCollector *collector, CuptiInterface *cupti_interface, int device_id, uint64_t start_tsc, uint64_t end_tsc, CUpti_CallbackDomain domain, CUpti_CallbackId cbid, @@ -904,7 +904,7 @@ void CuptiTracer::Enable(const CuptiTracerOptions &option, cupti_driver_api_hook_.reset(new CuptiDriverApiHookWithActivityApi( option, cupti_interface_, collector)); - Status status = EnableApiTracing(); + absl::Status status = EnableApiTracing(); need_root_access_ |= status.code() == tsl::error::PERMISSION_DENIED; if (!status.ok()) return; @@ -936,7 +936,7 @@ void CuptiTracer::Disable() { tsl::profiler::AnnotationStack::Enable(false); } -Status CuptiTracer::EnableApiTracing() { +absl::Status CuptiTracer::EnableApiTracing() { if (api_tracing_enabled_) return OkStatus(); VLOG(1) << "Enable subscriber"; @@ -964,7 +964,7 @@ Status CuptiTracer::EnableApiTracing() { return OkStatus(); } -Status CuptiTracer::DisableApiTracing() { +absl::Status CuptiTracer::DisableApiTracing() { if (!api_tracing_enabled_) return OkStatus(); api_tracing_enabled_ = false; @@ -989,7 +989,7 @@ Status CuptiTracer::DisableApiTracing() { return OkStatus(); } -Status CuptiTracer::EnableActivityTracing() { +absl::Status CuptiTracer::EnableActivityTracing() { if (activity_tracing_enabled_) return OkStatus(); PrepareActivityStart(); if (!option_->activities_selected.empty()) { @@ -1018,7 +1018,7 @@ Status CuptiTracer::EnableActivityTracing() { return OkStatus(); } -Status CuptiTracer::DisableActivityTracing() { +absl::Status CuptiTracer::DisableActivityTracing() { if (activity_tracing_enabled_) { VLOG(1) << "Disabling activity tracing for " << option_->activities_selected.size() << " activities"; @@ -1040,7 +1040,7 @@ Status CuptiTracer::DisableActivityTracing() { return OkStatus(); } -Status CuptiTracer::Finalize() { +absl::Status CuptiTracer::Finalize() { if (option_->cupti_finalize) { VLOG(1) << "CuptiFinalize"; RETURN_IF_CUPTI_ERROR(cupti_interface_->Finalize()); @@ -1059,8 +1059,8 @@ Status CuptiTracer::Finalize() { return 0; } -Status CuptiTracer::HandleNVTXCallback(CUpti_CallbackId cbid, - const CUpti_CallbackData *cbdata) { +absl::Status CuptiTracer::HandleNVTXCallback(CUpti_CallbackId cbid, + const CUpti_CallbackData *cbdata) { const CUpti_NvtxData *pdata = reinterpret_cast(cbdata); if (cbid == CUPTI_CBID_NVTX_nvtxDomainRangePushEx) { @@ -1078,9 +1078,9 @@ Status CuptiTracer::HandleNVTXCallback(CUpti_CallbackId cbid, return OkStatus(); } -Status CuptiTracer::HandleCallback(CUpti_CallbackDomain domain, - CUpti_CallbackId cbid, - const CUpti_CallbackData *cbdata) { +absl::Status CuptiTracer::HandleCallback(CUpti_CallbackDomain domain, + CUpti_CallbackId cbid, + const CUpti_CallbackData *cbdata) { if (!api_tracing_enabled_) return OkStatus(); // already unsubscribed. if (!cupti_driver_api_hook_) return OkStatus(); // already unsubscribed. if (domain == CUPTI_CB_DOMAIN_NVTX) return HandleNVTXCallback(cbid, cbdata); @@ -1190,8 +1190,9 @@ static size_t CountCuptiActivityEvent(uint8_t *buffer, size_t size) { return total_event_count; } -Status CuptiTracer::ProcessActivityBuffer(CUcontext context, uint32_t stream_id, - uint8_t *buffer, size_t size) { +absl::Status CuptiTracer::ProcessActivityBuffer(CUcontext context, + uint32_t stream_id, + uint8_t *buffer, size_t size) { absl::Cleanup buffer_cleanup = [&]() { if (buffer) activity_buffers_->ReclaimBuffer(buffer); }; diff --git a/third_party/xla/xla/backends/profiler/gpu/device_tracer_cuda.cc b/third_party/xla/xla/backends/profiler/gpu/device_tracer_cuda.cc index d7bb2524b66762..741a4129044e74 100644 --- a/third_party/xla/xla/backends/profiler/gpu/device_tracer_cuda.cc +++ b/third_party/xla/xla/backends/profiler/gpu/device_tracer_cuda.cc @@ -40,7 +40,6 @@ namespace xla { namespace profiler { using absl::OkStatus; -using absl::Status; using tensorflow::ProfileOptions; using tensorflow::profiler::XSpace; using tsl::ReadBoolFromEnvVar; @@ -55,13 +54,13 @@ class GpuTracer : public tsl::profiler::ProfilerInterface { ~GpuTracer() override {} // GpuTracer interface: - Status Start() override; - Status Stop() override; - Status CollectData(XSpace* space) override; + absl::Status Start() override; + absl::Status Stop() override; + absl::Status CollectData(XSpace* space) override; private: - Status DoStart(); - Status DoStop(); + absl::Status DoStart(); + absl::Status DoStop(); enum State { kNotStarted, @@ -77,7 +76,7 @@ class GpuTracer : public tsl::profiler::ProfilerInterface { std::unique_ptr cupti_collector_; }; -Status GpuTracer::DoStart() { +absl::Status GpuTracer::DoStart() { if (!cupti_tracer_->IsAvailable()) { return tsl::errors::Unavailable("Another profile session running."); } @@ -159,8 +158,8 @@ Status GpuTracer::DoStart() { return OkStatus(); } -Status GpuTracer::Start() { - Status status = DoStart(); +absl::Status GpuTracer::Start() { + absl::Status status = DoStart(); if (status.ok()) { profiling_state_ = State::kStartedOk; return OkStatus(); @@ -170,20 +169,20 @@ Status GpuTracer::Start() { } } -Status GpuTracer::DoStop() { +absl::Status GpuTracer::DoStop() { cupti_tracer_->Disable(); return OkStatus(); } -Status GpuTracer::Stop() { +absl::Status GpuTracer::Stop() { if (profiling_state_ == State::kStartedOk) { - Status status = DoStop(); + absl::Status status = DoStop(); profiling_state_ = status.ok() ? State::kStoppedOk : State::kStoppedError; } return OkStatus(); } -Status GpuTracer::CollectData(XSpace* space) { +absl::Status GpuTracer::CollectData(XSpace* space) { VLOG(2) << "Collecting data to XSpace from GpuTracer."; switch (profiling_state_) { case State::kNotStarted: diff --git a/third_party/xla/xla/backends/profiler/gpu/device_tracer_rocm.cc b/third_party/xla/xla/backends/profiler/gpu/device_tracer_rocm.cc index 81eb2d192ea09a..560ae8033af66e 100644 --- a/third_party/xla/xla/backends/profiler/gpu/device_tracer_rocm.cc +++ b/third_party/xla/xla/backends/profiler/gpu/device_tracer_rocm.cc @@ -75,13 +75,13 @@ class GpuTracer : public profiler::ProfilerInterface { ~GpuTracer() override {} // GpuTracer interface: - Status Start() override; - Status Stop() override; - Status CollectData(XSpace* space) override; + absl::Status Start() override; + absl::Status Stop() override; + absl::Status CollectData(XSpace* space) override; private: - Status DoStart(); - Status DoStop(); + absl::Status DoStart(); + absl::Status DoStop(); RocmTracerOptions GetRocmTracerOptions(); @@ -184,7 +184,7 @@ RocmTraceCollectorOptions GpuTracer::GetRocmTraceCollectorOptions( return options; } -Status GpuTracer::DoStart() { +absl::Status GpuTracer::DoStart() { if (!rocm_tracer_->IsAvailable()) { return tsl::errors::Unavailable("Another profile session running."); } @@ -208,8 +208,8 @@ Status GpuTracer::DoStart() { return OkStatus(); } -Status GpuTracer::Start() { - Status status = DoStart(); +absl::Status GpuTracer::Start() { + absl::Status status = DoStart(); if (status.ok()) { profiling_state_ = State::kStartedOk; return OkStatus(); @@ -219,21 +219,21 @@ Status GpuTracer::Start() { } } -Status GpuTracer::DoStop() { +absl::Status GpuTracer::DoStop() { rocm_tracer_->Disable(); AnnotationStack::Enable(false); return OkStatus(); } -Status GpuTracer::Stop() { +absl::Status GpuTracer::Stop() { if (profiling_state_ == State::kStartedOk) { - Status status = DoStop(); + absl::Status status = DoStop(); profiling_state_ = status.ok() ? State::kStoppedOk : State::kStoppedError; } return OkStatus(); } -Status GpuTracer::CollectData(XSpace* space) { +absl::Status GpuTracer::CollectData(XSpace* space) { switch (profiling_state_) { case State::kNotStarted: VLOG(3) << "No trace data collected, session wasn't started"; diff --git a/third_party/xla/xla/backends/profiler/gpu/rocm_collector.cc b/third_party/xla/xla/backends/profiler/gpu/rocm_collector.cc index 66f8000f6d0d52..d18b0a5d513842 100644 --- a/third_party/xla/xla/backends/profiler/gpu/rocm_collector.cc +++ b/third_party/xla/xla/backends/profiler/gpu/rocm_collector.cc @@ -18,6 +18,7 @@ limitations under the License. #include "absl/container/fixed_array.h" #include "absl/container/flat_hash_set.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" @@ -223,8 +224,8 @@ class PerDeviceCollector { stats.occupancy_pct /= device_properties_.maxThreadsPerMultiProcessor; err = hipOccupancyMaxPotentialBlockSize( - &stats.min_grid_size, &stats.suggested_block_size, static_cast(params.func_ptr), - params.dynamic_smem_size, 0); + &stats.min_grid_size, &stats.suggested_block_size, + static_cast(params.func_ptr), params.dynamic_smem_size, 0); if (err != hipError_t::hipSuccess) { return {}; @@ -578,8 +579,7 @@ class RocmTraceCollectorImpl : public profiler::RocmTraceCollector { num_activity_events_(0), start_walltime_ns_(start_walltime_ns), start_gputime_ns_(start_gputime_ns), - num_gpus_(options.num_gpus), - per_device_collector_(options.num_gpus) {} + num_gpus_(options.num_gpus) {} void AddEvent(RocmTracerEvent&& event, bool is_auxiliary) override; void Flush() override; @@ -613,9 +613,10 @@ class RocmTraceCollectorImpl : public profiler::RocmTraceCollector { absl::flat_hash_map auxiliary_api_events_map_ TF_GUARDED_BY(event_maps_mutex_); - const std::vector ApiActivityInfoExchange(); + const std::vector ApiActivityInfoExchange() + TF_EXCLUSIVE_LOCKS_REQUIRED(event_maps_mutex_); - absl::flat_hash_map per_device_collector_; + absl::flat_hash_map per_device_collector_; }; //========== diff --git a/third_party/xla/xla/backends/profiler/plugin/plugin_tracer.cc b/third_party/xla/xla/backends/profiler/plugin/plugin_tracer.cc index 8176ac55f553e3..57fb7980cf06ed 100644 --- a/third_party/xla/xla/backends/profiler/plugin/plugin_tracer.cc +++ b/third_party/xla/xla/backends/profiler/plugin/plugin_tracer.cc @@ -141,23 +141,23 @@ PluginTracer::~PluginTracer() { } } -Status PluginTracer::Start() { +absl::Status PluginTracer::Start() { PLUGIN_Profiler_Start_Args args; args.profiler = profiler_; RETURN_STATUS_IF_PLUGIN_PROFILER_ERROR(profiler_api_->start(&args), profiler_api_); - return OkStatus(); + return absl::OkStatus(); } -Status PluginTracer::Stop() { +absl::Status PluginTracer::Stop() { PLUGIN_Profiler_Stop_Args args; args.profiler = profiler_; RETURN_STATUS_IF_PLUGIN_PROFILER_ERROR(profiler_api_->stop(&args), profiler_api_); - return OkStatus(); + return absl::OkStatus(); } -Status PluginTracer::CollectData(XSpace* space) { +absl::Status PluginTracer::CollectData(XSpace* space) { PLUGIN_Profiler_CollectData_Args args; args.profiler = profiler_; args.buffer = nullptr; @@ -172,7 +172,7 @@ Status PluginTracer::CollectData(XSpace* space) { plane->Swap(&tpu_plane); } } - return OkStatus(); + return absl::OkStatus(); } } // namespace profiler diff --git a/third_party/xla/xla/backends/profiler/plugin/plugin_tracer.h b/third_party/xla/xla/backends/profiler/plugin/plugin_tracer.h index acd88644ac090f..832a72e6546ea0 100644 --- a/third_party/xla/xla/backends/profiler/plugin/plugin_tracer.h +++ b/third_party/xla/xla/backends/profiler/plugin/plugin_tracer.h @@ -33,11 +33,11 @@ class PluginTracer : public tsl::profiler::ProfilerInterface { const tensorflow::ProfileOptions& options); ~PluginTracer() override; - Status Start() override; + absl::Status Start() override; - Status Stop() override; + absl::Status Stop() override; - Status CollectData(tensorflow::profiler::XSpace* space) override; + absl::Status CollectData(tensorflow::profiler::XSpace* space) override; private: const PLUGIN_Profiler_Api* profiler_api_; diff --git a/third_party/xla/xla/backends/profiler/tpu/tpu_tracer.cc b/third_party/xla/xla/backends/profiler/tpu/tpu_tracer.cc index 19c7e57e49a13e..7488645cf40f2a 100644 --- a/third_party/xla/xla/backends/profiler/tpu/tpu_tracer.cc +++ b/third_party/xla/xla/backends/profiler/tpu/tpu_tracer.cc @@ -95,11 +95,11 @@ class TpuTracer : public ProfilerInterface { explicit TpuTracer(); ~TpuTracer() override; - Status Start() override; + absl::Status Start() override; - Status Stop() override; + absl::Status Stop() override; - Status CollectData(XSpace* space) override; + absl::Status CollectData(XSpace* space) override; private: TpuProfiler* tpu_profiler_; @@ -118,7 +118,7 @@ TpuTracer::~TpuTracer() { stream_executor::tpu::ProfilerApiFn()->TpuProfiler_DestroyFn(tpu_profiler_); } -Status TpuTracer::Start() { +absl::Status TpuTracer::Start() { ProfilerStatusHelper status; stream_executor::tpu::ProfilerApiFn()->TpuProfiler_StartFn(tpu_profiler_, status.c_status); @@ -126,10 +126,10 @@ Status TpuTracer::Start() { LOG(ERROR) << "TPU tracer failed to start."; return status.status(); } - return OkStatus(); + return absl::OkStatus(); } -Status TpuTracer::Stop() { +absl::Status TpuTracer::Stop() { ProfilerStatusHelper status; stream_executor::tpu::ProfilerApiFn()->TpuProfiler_StopFn(tpu_profiler_, status.c_status); @@ -137,10 +137,10 @@ Status TpuTracer::Stop() { LOG(ERROR) << "TPU tracer failed to stop."; return status.status(); } - return OkStatus(); + return absl::OkStatus(); } -Status TpuTracer::CollectData(XSpace* space) { +absl::Status TpuTracer::CollectData(XSpace* space) { ProfilerStatusHelper status; // Get size of buffer required for TPU driver to serialize XSpace into. size_t size_in_bytes; @@ -164,7 +164,7 @@ Status TpuTracer::CollectData(XSpace* space) { LOG(ERROR) << "TPU tracer failed to collect data."; return status.status(); } - return OkStatus(); + return absl::OkStatus(); } // Initializes TpuProfilerApiFns. The initialization may not be successful if diff --git a/third_party/xla/xla/client/client.cc b/third_party/xla/xla/client/client.cc index 1d2fdab9029afa..d79d978dfeae54 100644 --- a/third_party/xla/xla/client/client.cc +++ b/third_party/xla/xla/client/client.cc @@ -49,7 +49,7 @@ absl::StatusOr Client::Transfer(const GlobalData& data, VLOG(1) << "making transfer request"; VLOG(3) << "TransferToClientRequest: {" << request.DebugString() << "}"; - Status s = stub_->TransferToClient(&request, &response); + absl::Status s = stub_->TransferToClient(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -76,7 +76,7 @@ absl::StatusOr> Client::TransferToServer( VLOG(1) << "making transfer to server request"; VLOG(3) << "TransferToServerRequest: {" << request.DebugString() << "}"; - Status s = stub_->TransferToServer(&request, &response); + absl::Status s = stub_->TransferToServer(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -93,8 +93,9 @@ absl::StatusOr> Client::TransferToServer( return std::make_unique(stub_, response.data()); } -Status Client::TransferToInfeed(const LiteralSlice& literal, int64_t replica_id, - const DeviceHandle* device_handle) { +absl::Status Client::TransferToInfeed(const LiteralSlice& literal, + int64_t replica_id, + const DeviceHandle* device_handle) { TransferToInfeedRequest request; *request.mutable_literal() = literal.ToProto(); if (device_handle) { @@ -105,7 +106,7 @@ Status Client::TransferToInfeed(const LiteralSlice& literal, int64_t replica_id, VLOG(1) << "making transfer to infeed request"; VLOG(3) << "TransferToInfeedRequest: {" << request.DebugString() << "}"; - Status s = stub_->TransferToInfeed(&request, &response); + absl::Status s = stub_->TransferToInfeed(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -130,7 +131,7 @@ absl::StatusOr Client::TransferFromOutfeed( VLOG(1) << "making transfer from outfeed request"; VLOG(3) << "TransferFromOutfeedRequest: {" << request.DebugString() << "}"; - Status s = stub_->TransferFromOutfeed(&request, &response); + absl::Status s = stub_->TransferFromOutfeed(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -147,13 +148,13 @@ absl::StatusOr Client::TransferFromOutfeed( return Literal::CreateFromProto(response.literal()); } -Status Client::ResetDevice() { +absl::Status Client::ResetDevice() { ResetDeviceRequest request; ResetDeviceResponse response; VLOG(1) << "making reset device request"; VLOG(3) << "ResetDeviceRequest: {" << request.DebugString() << "}"; - Status s = stub_->ResetDevice(&request, &response); + absl::Status s = stub_->ResetDevice(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -192,7 +193,7 @@ absl::StatusOr Client::ComputeConstant( ComputeConstantResponse response; VLOG(2) << "making compute-constant-graph request"; - Status s = stub_->ComputeConstantGraph(&request, &response); + absl::Status s = stub_->ComputeConstantGraph(&request, &response); VLOG(2) << "done with request"; if (!s.ok()) { @@ -238,7 +239,7 @@ absl::StatusOr Client::Compile( CompileResponse response; VLOG(1) << "making compile request: " << request.ShortDebugString(); - Status s = stub_->Compile(&request, &response); + absl::Status s = stub_->Compile(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -260,7 +261,7 @@ absl::StatusOr> Client::Execute( ExecuteResponse response; VLOG(1) << "making execute request: " << request.ShortDebugString(); - Status s = stub_->Execute(&request, &response); + absl::Status s = stub_->Execute(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -342,7 +343,7 @@ Client::ExecuteParallel(absl::Span computations) { ExecuteParallelResponse response; VLOG(1) << "making execute-graph-parallel request: " << request.ShortDebugString(); - Status s = stub_->ExecuteGraphParallel(&request, &response); + absl::Status s = stub_->ExecuteGraphParallel(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -372,7 +373,7 @@ absl::StatusOr> Client::GetDeviceHandles( GetDeviceHandlesResponse response; VLOG(1) << "making get device request: " << request.ShortDebugString(); - Status s = stub_->GetDeviceHandles(&request, &response); + absl::Status s = stub_->GetDeviceHandles(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -389,13 +390,13 @@ absl::StatusOr> Client::GetDeviceHandles( return device_handles; } -Status Client::Unregister(const GlobalData& data) { +absl::Status Client::Unregister(const GlobalData& data) { UnregisterRequest request; *request.add_data() = data.handle(); UnregisterResponse response; VLOG(1) << "making unregister request"; - Status s = stub_->Unregister(&request, &response); + absl::Status s = stub_->Unregister(&request, &response); VLOG(1) << "done with request"; return s; @@ -408,7 +409,7 @@ Client::DeconstructTuple(const GlobalData& data) { DeconstructTupleResponse response; VLOG(1) << "making DestructTuple request"; - Status s = stub_->DeconstructTuple(&request, &response); + absl::Status s = stub_->DeconstructTuple(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -433,7 +434,7 @@ absl::StatusOr Client::GetComputationStats( ComputationStatsResponse response; VLOG(1) << "making computation graph stats request"; - Status s = stub_->GetComputationGraphStats(&request, &response); + absl::Status s = stub_->GetComputationGraphStats(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -455,7 +456,7 @@ absl::StatusOr Client::GetShape(const GlobalData& data) { GetShapeResponse response; VLOG(1) << "making get shape request"; - Status s = stub_->GetShape(&request, &response); + absl::Status s = stub_->GetShape(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { @@ -493,7 +494,7 @@ absl::StatusOr Client::CreateChannelHandleByType( CreateChannelHandleResponse response; VLOG(1) << "making create channel handle request"; - Status s = stub_->CreateChannelHandle(&request, &response); + absl::Status s = stub_->CreateChannelHandle(&request, &response); VLOG(1) << "done with request"; if (!s.ok()) { diff --git a/third_party/xla/xla/client/client.h b/third_party/xla/xla/client/client.h index e4c48d58359958..9831182c9c40e0 100644 --- a/third_party/xla/xla/client/client.h +++ b/third_party/xla/xla/client/client.h @@ -151,8 +151,9 @@ class Client { // device_handle and replica_id together specify a particular device; a device // assigned for the given replica_id among the replicas that the given device // handle belongs to. - Status TransferToInfeed(const LiteralSlice& literal, int64_t replica_id = 0, - const DeviceHandle* device_handle = nullptr); + absl::Status TransferToInfeed(const LiteralSlice& literal, + int64_t replica_id = 0, + const DeviceHandle* device_handle = nullptr); // Transfers from the Outfeed of the device. // @@ -164,7 +165,7 @@ class Client { const DeviceHandle* device_handle = nullptr); // Resets the device, clearing all existing state on the device. - Status ResetDevice(); + absl::Status ResetDevice(); // Executes the computation with the given arguments and transfers the result // to the client as a literal. Parameters are defined the same as for @@ -195,7 +196,7 @@ class Client { const Layout* output_layout = nullptr) const; // Unregister the memory for the given GlobalData on the device. - Status Unregister(const GlobalData& data); + absl::Status Unregister(const GlobalData& data); // Returns a vector of global data handles that point to the tuple elements. absl::StatusOr>> DeconstructTuple( diff --git a/third_party/xla/xla/client/global_data.cc b/third_party/xla/xla/client/global_data.cc index 1235d1e3c50d6b..5fb1ca9df693ac 100644 --- a/third_party/xla/xla/client/global_data.cc +++ b/third_party/xla/xla/client/global_data.cc @@ -37,7 +37,7 @@ void ReleaseHandles(ServiceInterface* parent, *request.add_data() = handle; } UnregisterResponse response; - Status status = parent->Unregister(&request, &response); + absl::Status status = parent->Unregister(&request, &response); VLOG(1) << "Done with request"; if (!status.ok()) { LOG(WARNING) << "Failed to unregister handles: " << status diff --git a/third_party/xla/xla/client/lib/math.cc b/third_party/xla/xla/client/lib/math.cc index 60592de4ddc80f..772faba50d78c6 100644 --- a/third_party/xla/xla/client/lib/math.cc +++ b/third_party/xla/xla/client/lib/math.cc @@ -93,7 +93,8 @@ static XlaOp DoWithUpcastToF32(XlaOp operand, // TODO(jlebar): Use this function in more places in this file to restrict the // domain of other functions. -static Status EnsureOperandIsRealFp(absl::string_view op_name, XlaOp operand) { +static absl::Status EnsureOperandIsRealFp(absl::string_view op_name, + XlaOp operand) { auto& b = *operand.builder(); TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand)); auto elem_ty = shape.element_type(); diff --git a/third_party/xla/xla/client/lib/svd.cc b/third_party/xla/xla/client/lib/svd.cc index 9e7a33209d0b75..2304ec3b14c253 100644 --- a/third_party/xla/xla/client/lib/svd.cc +++ b/third_party/xla/xla/client/lib/svd.cc @@ -829,7 +829,7 @@ absl::StatusOr SortBySingularValuesAndPostProcessing( SVDResult SVD(XlaOp a, int64_t max_iter, float epsilon, PrecisionConfig::Precision precision) { XlaBuilder* builder = a.builder(); - auto return_error = [&](const Status& status) { + auto return_error = [&](const absl::Status& status) { SVDResult result; result.u = builder->ReportError(status); result.v = builder->ReportError(status); diff --git a/third_party/xla/xla/client/lib/tridiagonal.cc b/third_party/xla/xla/client/lib/tridiagonal.cc index 4daab7b4e8408b..459becdcfa4734 100644 --- a/third_party/xla/xla/client/lib/tridiagonal.cc +++ b/third_party/xla/xla/client/lib/tridiagonal.cc @@ -36,9 +36,9 @@ namespace tridiagonal { namespace { -Status CheckSecondToLastDimension(const Shape& op_shape, int64_t rank, - int64_t expected, - const std::string& op_name) { +absl::Status CheckSecondToLastDimension(const Shape& op_shape, int64_t rank, + int64_t expected, + const std::string& op_name) { const auto actual_num_dims = ShapeUtil::GetDimension(op_shape, rank - 2); if (actual_num_dims != expected) { @@ -119,9 +119,9 @@ struct TridiagonalMatMulShapeParams { PrimitiveType element_type; }; -Status ValidateTridiagonalMatMulDiagonal(const Shape& diagonal_shape, - const std::string_view diagonal_name, - const Shape& rhs_shape) { +absl::Status ValidateTridiagonalMatMulDiagonal( + const Shape& diagonal_shape, const std::string_view diagonal_name, + const Shape& rhs_shape) { const int64_t diagonal_rank = diagonal_shape.rank(); const int64_t rhs_rank = rhs_shape.rank(); if (diagonal_rank != rhs_rank) { diff --git a/third_party/xla/xla/client/local_client.cc b/third_party/xla/xla/client/local_client.cc index 59d4ecf6a6e82d..f8a29b126dacd3 100644 --- a/third_party/xla/xla/client/local_client.cc +++ b/third_party/xla/xla/client/local_client.cc @@ -51,7 +51,7 @@ LocalExecutable::LocalExecutable(std::unique_ptr executable, << "Must have a valid device ordinal that the executable was built for."; } -Status LocalExecutable::ValidateExecutionOptions( +absl::Status LocalExecutable::ValidateExecutionOptions( const ExecutableRunOptions& run_options, const Backend& backend) { if (run_options.stream() != nullptr) { if (!run_options.stream()->ok()) { @@ -207,7 +207,7 @@ static std::shared_ptr DumpArguments( for (const ShapedBuffer* arg : arguments) { auto literal = std::make_shared(arg->on_host_shape()); backend->transfer_manager()->TransferLiteralFromDevice( - stream, *arg, literal.get(), [snapshot, literal](Status status) { + stream, *arg, literal.get(), [snapshot, literal](absl::Status status) { if (!status.ok()) { LOG(ERROR) << "TransferLiteralFromDevice for HLO snapshot inputs " "failed: " @@ -227,7 +227,7 @@ static void DumpOutputsAndSaveSnapshot(const Backend* backend, auto literal = std::make_shared(outputs.on_host_shape()); backend->transfer_manager()->TransferLiteralFromDevice( stream, outputs, literal.get(), - [snapshot{std::move(snapshot)}, literal](Status status) { + [snapshot{std::move(snapshot)}, literal](absl::Status status) { if (status.ok()) { *snapshot->mutable_result() = literal->ToProto(); } else { @@ -471,16 +471,16 @@ absl::StatusOr LocalClient::GlobalDataToShapedBuffer( return local_service_->GlobalDataToShapedBuffer(data, replica_number); } -Status LocalClient::TransferToInfeedLocal(const LiteralSlice& literal, - int device_ordinal) { +absl::Status LocalClient::TransferToInfeedLocal(const LiteralSlice& literal, + int device_ordinal) { TF_ASSIGN_OR_RETURN(se::StreamExecutor * executor, backend().stream_executor(device_ordinal)); return backend().transfer_manager()->TransferLiteralToInfeed(executor, literal); } -Status LocalClient::TransferFromOutfeedLocal(int device_ordinal, - MutableBorrowingLiteral literal) { +absl::Status LocalClient::TransferFromOutfeedLocal( + int device_ordinal, MutableBorrowingLiteral literal) { TF_ASSIGN_OR_RETURN(se::StreamExecutor * executor, backend().stream_executor(device_ordinal)); return backend().transfer_manager()->TransferLiteralFromOutfeed(executor, diff --git a/third_party/xla/xla/client/local_client.h b/third_party/xla/xla/client/local_client.h index d6e382192cab86..a73210b0af7f63 100644 --- a/third_party/xla/xla/client/local_client.h +++ b/third_party/xla/xla/client/local_client.h @@ -85,8 +85,8 @@ class LocalExecutable { // // The given ExecutableRunOptions override any values from TF_XLA_FLAGS // environment variable. - Status ValidateExecutionOptions(const ExecutableRunOptions& run_options, - const Backend& backend); + absl::Status ValidateExecutionOptions(const ExecutableRunOptions& run_options, + const Backend& backend); // Returns a literal containing the contents of the given ShapedBuffer. absl::StatusOr LiteralFromShapedBuffer( @@ -102,7 +102,7 @@ class LocalExecutable { int build_device_ordinal() const { return build_options_.device_ordinal(); } template - StatusOr AsyncCallAndBlockHostUntilDone( + absl::StatusOr AsyncCallAndBlockHostUntilDone( absl::Span argument_shapes, const ExecutableRunOptions& run_options, std::function(const ExecutableRunOptions&)> async_callback) { @@ -110,8 +110,8 @@ class LocalExecutable { RunHelper(argument_shapes, run_options)); ExecutableRunOptions options = options_and_stream.first.run_options(); options.set_device_ordinal(-1); - StatusOr result = async_callback(options); - Status block_status = options.stream()->BlockHostUntilDone(); + absl::StatusOr result = async_callback(options); + absl::Status block_status = options.stream()->BlockHostUntilDone(); TF_RETURN_IF_ERROR(result.status()); TF_RETURN_IF_ERROR(block_status); return result; @@ -188,15 +188,16 @@ class LocalClient : public Client { // TODO(b/69670845): Remove the 'Local' from the name when LocalClient does // not inherit from Client and there is no possibility of confusion with // Client::TransferToInfeed. - Status TransferToInfeedLocal(const LiteralSlice& literal, int device_ordinal); + absl::Status TransferToInfeedLocal(const LiteralSlice& literal, + int device_ordinal); // Transfer and return a value from the outfeed of the given device. The // shape of the object to transfer is determined by `literal`'s shape. // TODO(b/69670845): Remove the 'Local' from the name when LocalClient does // not inherit from Client and there is no possibility of confusion with // Client::TransferFromOutfeed. - Status TransferFromOutfeedLocal(int device_ordinal, - MutableBorrowingLiteral literal); + absl::Status TransferFromOutfeedLocal(int device_ordinal, + MutableBorrowingLiteral literal); // Returns the device ordinal that corresponds to the given replica number. // diff --git a/third_party/xla/xla/client/padding.cc b/third_party/xla/xla/client/padding.cc index f36c1cd18a505b..22c4901a512362 100644 --- a/third_party/xla/xla/client/padding.cc +++ b/third_party/xla/xla/client/padding.cc @@ -25,9 +25,9 @@ limitations under the License. namespace xla { -Status ValidatePaddingValues(absl::Span input_dimensions, - absl::Span window_dimensions, - absl::Span window_strides) { +absl::Status ValidatePaddingValues(absl::Span input_dimensions, + absl::Span window_dimensions, + absl::Span window_strides) { bool ok = input_dimensions.size() == window_dimensions.size() && input_dimensions.size() == window_strides.size(); if (!ok) { diff --git a/third_party/xla/xla/client/padding.h b/third_party/xla/xla/client/padding.h index 5c4a34b0d09220..50ed6e58057ad8 100644 --- a/third_party/xla/xla/client/padding.h +++ b/third_party/xla/xla/client/padding.h @@ -41,9 +41,9 @@ enum class Padding { // Validates that the slices are acceptable for determining padding -- this can // be used to check the preconditions of MakePadding below to produce an error // message that can be returned to the user. -Status ValidatePaddingValues(absl::Span input_dimensions, - absl::Span window_dimensions, - absl::Span window_strides); +absl::Status ValidatePaddingValues(absl::Span input_dimensions, + absl::Span window_dimensions, + absl::Span window_strides); // Returns the padding needed for the base area, given the base area dimensions, // window dimensions, strides, and the type of padding. diff --git a/third_party/xla/xla/client/xla_builder.cc b/third_party/xla/xla/client/xla_builder.cc index ecc6b86dfcb575..e6b4e955aa69dd 100644 --- a/third_party/xla/xla/client/xla_builder.cc +++ b/third_party/xla/xla/client/xla_builder.cc @@ -15,6 +15,7 @@ limitations under the License. #include "xla/client/xla_builder.h" +#include #include #include #include @@ -111,8 +112,8 @@ bool InstrIsSetBound(const HloInstructionProto* instr_proto) { return false; } -Status NormalizeAndAssignSharing(HloInstructionProto* instr, - const OpSharding& op_sharding) { +absl::Status NormalizeAndAssignSharing(HloInstructionProto* instr, + const OpSharding& op_sharding) { // Normalize tuple sharding and fail the call if the sharding is invalid. Shape shape(instr->shape()); TF_ASSIGN_OR_RETURN(HloSharding sharding, @@ -531,7 +532,7 @@ XlaBuilder::XlaBuilder(const std::string& computation_name) XlaBuilder::~XlaBuilder() = default; -XlaOp XlaBuilder::ReportError(const Status& error) { +XlaOp XlaBuilder::ReportError(const absl::Status& error) { CHECK(!error.ok()); if (die_immediately_on_error_) { LOG(FATAL) << "error building computation: " << error; @@ -686,16 +687,16 @@ void XlaBuilder::IsConstantVisitor(const int64_t op_handle, int depth, visited->insert(op_handle); } -Status XlaBuilder::SetInstructionFrontendAttribute(const XlaOp op, - std::string attribute, - std::string value) { +absl::Status XlaBuilder::SetInstructionFrontendAttribute(const XlaOp op, + std::string attribute, + std::string value) { TF_ASSIGN_OR_RETURN(auto instr_proto, LookUpMutableInstruction(op)); auto* frontend_attributes = instr_proto->mutable_frontend_attributes(); (*frontend_attributes->mutable_map())[attribute] = std::move(value); return OkStatus(); } -Status XlaBuilder::SetInstructionSharding( +absl::Status XlaBuilder::SetInstructionSharding( XlaOp op, const std::optional& sharding) { TF_ASSIGN_OR_RETURN(auto instr_proto, LookUpMutableInstruction(op)); if (!sharding.has_value()) { @@ -716,7 +717,7 @@ XlaComputation XlaBuilder::BuildAndNoteError() { return std::move(build_status).value(); } -Status XlaBuilder::GetCurrentStatus() const { +absl::Status XlaBuilder::GetCurrentStatus() const { if (!first_error_.ok()) { std::string backtrace; first_error_backtrace_.Dump(tsl::DebugWriteToString, &backtrace); @@ -803,7 +804,7 @@ absl::StatusOr XlaBuilder::Build( return std::move(computation); } -/* static */ Status XlaBuilder::PopulateInputOutputAliasAndBufferDonor( +/* static */ absl::Status XlaBuilder::PopulateInputOutputAliasAndBufferDonor( HloModuleProto* module, const ProgramShape& program_shape, const std::vector& input_output_aliases, const absl::flat_hash_set& @@ -1986,7 +1987,7 @@ XlaOp XlaBuilder::SparseDot( }); } -Status XlaBuilder::VerifyConvolution( +absl::Status XlaBuilder::VerifyConvolution( const Shape& lhs_shape, const Shape& rhs_shape, const ConvolutionDimensionNumbers& dimension_numbers) const { if (lhs_shape.rank() != rhs_shape.rank()) { @@ -3332,7 +3333,7 @@ XlaOp XlaBuilder::ConditionalImpl( }); } -Status XlaBuilder::CheckOpBuilder(XlaOp op) const { +absl::Status XlaBuilder::CheckOpBuilder(XlaOp op) const { if (this != op.builder()) { return InvalidArgument( "XlaOp with handle %d is built by builder '%s', but is trying to use " @@ -3789,15 +3790,55 @@ XlaOp XlaBuilder::AllToAllArray( return all_to_all; } DimensionVector sizes; + const bool is_unbounded = operand_shape->is_unbounded_dynamic(); + std::vector dynamic_sizes; + auto GetR1DimensionSizeOrConstant = [&](XlaOp operand, + int64_t dimension) -> XlaOp { + if (operand_shape->is_unbounded_dynamic_dimension(dimension)) { + return Reshape(GetDimensionSize(operand, dimension), {1}); + } + return ConstantR1( + this, {static_cast(operand_shape->dimensions(dimension))}); + }; + XlaOp r1_split_count = + ConstantR1(this, {static_cast(split_count)}); for (int64_t i = 0; i < operand_shape->rank(); ++i) { if (i != split_dimension) { sizes.push_back(operand_shape->dimensions(i)); + if (is_unbounded) { + dynamic_sizes.push_back(GetR1DimensionSizeOrConstant(operand, i)); + } continue; } sizes.push_back(split_count); - sizes.push_back(operand_shape->dimensions(i) / split_count); + sizes.push_back(operand_shape->is_unbounded_dynamic_dimension(i) + ? Shape::kUnboundedSize + : operand_shape->dimensions(i) / split_count); + + if (is_unbounded) { + dynamic_sizes.push_back(r1_split_count); + dynamic_sizes.push_back( + operand_shape->is_unbounded_dynamic_dimension(i) + ? Div(GetR1DimensionSizeOrConstant(operand, i), r1_split_count) + : ConstantR1(this, + {static_cast(sizes.back())})); + } + } + + if (is_unbounded) { + std::vector dynamic_dimensions; + std::transform( + sizes.begin(), sizes.end(), std::back_inserter(dynamic_dimensions), + [](int64_t size) { return size == Shape::kUnboundedSize; }); + TF_ASSIGN_OR_RETURN( + const Shape shape, + ShapeUtil::MakeValidatedShape(all_to_all_shape.element_type(), sizes, + dynamic_dimensions)); + all_to_all = + MhloDynamicReshape(all_to_all, ConcatInDim(dynamic_sizes, 0), shape); + } else { + all_to_all = Reshape(all_to_all, sizes); } - all_to_all = Reshape(all_to_all, sizes); std::vector permutation; const auto rank = operand_shape->rank(); @@ -3810,6 +3851,21 @@ XlaOp XlaBuilder::AllToAllArray( permutation.push_back(dim_after_reshape); } all_to_all = Transpose(all_to_all, permutation); + + if (is_unbounded) { + std::vector new_dimensions; + for (int64_t i = 0; i < operand_shape->rank(); ++i) { + new_dimensions.push_back(GetR1DimensionSizeOrConstant(operand, i)); + } + new_dimensions[split_dimension] = + Div(new_dimensions[split_dimension], r1_split_count); + new_dimensions[concat_dimension] = + Mul(new_dimensions[concat_dimension], r1_split_count); + + return MhloDynamicReshape(all_to_all, ConcatInDim(new_dimensions, 0), + all_to_all_shape); + } + return Reshape(all_to_all_shape, all_to_all); }); } @@ -3865,6 +3921,13 @@ XlaOp XlaBuilder::AllToAllTuple( const std::optional& channel_id) { return ReportErrorOrReturn([&]() -> absl::StatusOr { TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand)); + if (operand_shape->is_unbounded_dynamic() || + split_dimension == Shape::kUnboundedSize || + concat_dimension == Shape::kUnboundedSize || + split_count == Shape::kUnboundedSize) { + return InvalidArgument( + "AllToAllTuple does not support unbounded dynamic shapes"); + } // The HloInstruction for AllToAll currently only handles the data // communication: it accepts N already split parts and scatters them to N @@ -3890,14 +3953,14 @@ XlaOp XlaBuilder::AllToAllTuple( } // Handle data communication. - XlaOp alltoall = + XlaOp all_to_all = this->AllToAllTuple(slices, replica_groups, layout, channel_id); // Concat the N received parts. std::vector received; received.reserve(split_count); for (int i = 0; i < split_count; i++) { - received.push_back(this->GetTupleElement(alltoall, i)); + received.push_back(this->GetTupleElement(all_to_all, i)); } return this->ConcatInDim(received, concat_dimension); }); @@ -4542,7 +4605,7 @@ XlaBuilder::CreateDefaultConvDimensionNumbers(int num_spatial_dims) { return dimension_numbers; } -/* static */ Status XlaBuilder::Validate( +/* static */ absl::Status XlaBuilder::Validate( const ConvolutionDimensionNumbers& dnum) { if (dnum.input_spatial_dimensions_size() < 2) { return FailedPrecondition("input spacial dimension < 2: %d", diff --git a/third_party/xla/xla/client/xla_builder.h b/third_party/xla/xla/client/xla_builder.h index 571904f7cd7995..d88c90b2cf8896 100644 --- a/third_party/xla/xla/client/xla_builder.h +++ b/third_party/xla/xla/client/xla_builder.h @@ -336,7 +336,7 @@ class XlaBuilder { int num_spatial_dims = 2); // Returns an error if the convolution dimension numbers have conflicts. - static Status Validate(const ConvolutionDimensionNumbers& dnum); + static absl::Status Validate(const ConvolutionDimensionNumbers& dnum); // Returns a new XlaBuilder whose resultant Computation is used only by this // XlaBuilder. The sub-XlaBuilder has the same die_immediately_on_error @@ -385,11 +385,11 @@ class XlaBuilder { // building the computation when they make a final call to Build(). // // See also set_die_immediately_on_error(). - Status first_error() const { return first_error_; } + absl::Status first_error() const { return first_error_; } // Returns the current status of the builder, complete with the stack trace // information. - Status GetCurrentStatus() const; + absl::Status GetCurrentStatus() const; // Returns the shape of the given op. absl::StatusOr GetShape(XlaOp op) const; @@ -416,15 +416,15 @@ class XlaBuilder { // * dying if die_immediately_on_error_ is true. // Returns an XlaOp with an invalid handle but a valid builder. This value can // be returned in place of a value in APIs that return an XlaOp. - XlaOp ReportError(const Status& error); + XlaOp ReportError(const absl::Status& error); - // A helper function that converts a StatusOr into an XlaOp. - // If the Status was an error, reports the error to builder and returns an - // invalid XlaOp handle. + // A helper function that converts a absl::StatusOr into an XlaOp. + // If the absl::Status was an error, reports the error to builder and returns + // an invalid XlaOp handle. XlaOp ReportErrorOrReturn(const absl::StatusOr& op); - // A helper function that runs a function that returns a StatusOr and - // returns an XlaOp. + // A helper function that runs a function that returns a absl::StatusOr + // and returns an XlaOp. XlaOp ReportErrorOrReturn( absl::FunctionRef()> op_creator); @@ -475,15 +475,15 @@ class XlaBuilder { // "value". If the attribute already existed, then its value is updated. // // The attribute is only added to the HloInstruction, not to the builder. - Status SetInstructionFrontendAttribute(XlaOp op, std::string attribute, - std::string value); + absl::Status SetInstructionFrontendAttribute(XlaOp op, std::string attribute, + std::string value); // Looks up the HloInstruction and sets the sharding. If the sharding already // existed, then its value is updated. // // The sharding is only added to the HloInstruction, not to the builder. - Status SetInstructionSharding(XlaOp op, - const std::optional& sharding); + absl::Status SetInstructionSharding( + XlaOp op, const std::optional& sharding); // Returns shapes for the operands. absl::StatusOr> GetOperandShapes( @@ -1117,7 +1117,7 @@ class XlaBuilder { bool* is_constant) const; // Checks bounds for convolution parameters. - Status VerifyConvolution( + absl::Status VerifyConvolution( const Shape& lhs_shape, const Shape& rhs_shape, const ConvolutionDimensionNumbers& dimension_numbers) const; @@ -1125,7 +1125,7 @@ class XlaBuilder { // Populates the module with the input/output alias information stored within // the input_output_aliases vector. - static Status PopulateInputOutputAliasAndBufferDonor( + static absl::Status PopulateInputOutputAliasAndBufferDonor( HloModuleProto* module, const ProgramShape& program_shape, const std::vector& input_output_aliases, const absl::flat_hash_set& @@ -1139,7 +1139,7 @@ class XlaBuilder { // The first error encountered while building the computation. // This is OK until the first error is encountered. - Status first_error_; + absl::Status first_error_; // The saved stack trace from the point at which the first error occurred. tsl::SavedStackTrace first_error_backtrace_; @@ -1661,7 +1661,7 @@ class XlaBuilder { protected: // Returns OK status if the given op was built using this builder. Otherwise, // returns an error. - Status CheckOpBuilder(XlaOp op) const; + absl::Status CheckOpBuilder(XlaOp op) const; private: XlaOp AllGatherImpl(XlaOp operand, int64_t all_gather_dimension, @@ -1703,7 +1703,7 @@ class XlaBuilder { // Here, InstructionType is either const HloInstructionProto* or non-const // HloInstructionProto*. template - StatusOr LookUpInstructionByHandleInternal( + absl::StatusOr LookUpInstructionByHandleInternal( int64_t handle) const { auto it = handle_to_index_.find(handle); if (it == handle_to_index_.end()) { @@ -1723,11 +1723,11 @@ class XlaBuilder { // Here, InstructionType is either const HloInstructionProto* or non-const // HloInstructionProto*. // - // TODO(hinsu): Return const pointer within StatusOr and use + // TODO(hinsu): Return const pointer within absl::StatusOr and use // absl::implicit_cast at callsites. This requires implicit_cast support in // absl::StatusOr similar to absl::StatusOr. template - StatusOr LookUpInstructionInternal(XlaOp op) const { + absl::StatusOr LookUpInstructionInternal(XlaOp op) const { TF_RETURN_IF_ERROR(CheckOpBuilder(op)); return LookUpInstructionByHandleInternal(op.handle()); } @@ -2564,7 +2564,10 @@ XlaOp ReduceScatter( const std::optional& layout = std::nullopt, std::optional use_global_device_ids = std::nullopt); -// Enqueues an operation that do an Alltoall of the operand cross cores. +// Enqueues an operation that do an AllToAll of the operand cross cores. +// This involves AllToAll, followed by Reshape, Transpose, and another Reshape +// to get proper codegen. See implementation for additional details. +// // An optional `layout` can be specified to force the layout of the instruction. // This is used to guarantee the same layout for a group of AllToAll ops // compiled separately. diff --git a/third_party/xla/xla/client/xla_builder_test.cc b/third_party/xla/xla/client/xla_builder_test.cc index 3aa8c1ca00c5aa..fad65f35384248 100644 --- a/third_party/xla/xla/client/xla_builder_test.cc +++ b/third_party/xla/xla/client/xla_builder_test.cc @@ -754,7 +754,7 @@ TEST(XlaBuilderTest, BuildWithSpecificRootWithWrongBuilder) { Parameter(&b, 0, shape, "param"); const XlaOp other_param = Parameter(&other_b, 0, shape, "other_param"); - Status status = b.Build(other_param).status(); + absl::Status status = b.Build(other_param).status(); ASSERT_IS_NOT_OK(status); EXPECT_THAT( status.message(), @@ -1202,7 +1202,7 @@ TEST(XlaBuilderTest, DynamicSelectNotCompatible) { auto gte0 = GetTupleElement(p0, 0); // f32[4,<=5,6] auto gte1 = GetTupleElement(p0, 1); // f32[4,5,<=6] Select(pred, gte0, gte1); - Status status = BuildHloModule(b).status(); + absl::Status status = BuildHloModule(b).status(); ASSERT_IS_OK(status); } @@ -1240,6 +1240,54 @@ TEST(XlaBuilderTest, DotWithPreferredElementType) { ShapeUtil::Equal(ShapeUtil::MakeShape(U32, {2, 2}), result_shape)); } +TEST(XlaBuilderTest, FftWithFFT) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[5, <=10]")); + const std::vector fft_length = {5, 10}; + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[5, <=10]")); + Fft(Parameter(&b, 0, operand, "operand"), /*fft_type=*/FftType::FFT, + fft_length); + TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + +TEST(XlaBuilderTest, FftWithIFFT) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[5, <=10]")); + const std::vector fft_length = {5, 10}; + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[5, <=10]")); + Fft(Parameter(&b, 0, operand, "operand"), /*fft_type=*/FftType::IFFT, + fft_length); + TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + +TEST(XlaBuilderTest, FftWithRFFT) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f64[10, <=5]")); + const std::vector fft_length = {5}; + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c128[10, <=3]")); + Fft(Parameter(&b, 0, operand, "operand"), /*fft_type=*/FftType::RFFT, + fft_length); + TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + +TEST(XlaBuilderTest, FftWithIRFFT) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c128[10, <=3]")); + const std::vector fft_length = {5}; + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f64[10, <=5]")); + Fft(Parameter(&b, 0, operand, "operand"), /*fft_type=*/FftType::IRFFT, + fft_length); + TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + TEST(XlaBuilderTest, SparseDot) { XlaBuilder b(TestName()); auto lhs = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {10, 16}), "lhs"); @@ -1299,7 +1347,7 @@ TEST(XlaBuilderTest, ConvolutionWithPreferredElementType) { TEST(XlaBuilderTest, AfterAllWithNonTokenOperands) { XlaBuilder b(TestName()); AfterAll(&b, {CreateToken(&b), ConstantR0(&b, 1.0)}); - Status status = b.Build().status(); + absl::Status status = b.Build().status(); ASSERT_IS_NOT_OK(status); EXPECT_THAT(status.message(), ::testing::HasSubstr("All operands to AfterAll must be tokens")); @@ -1999,6 +2047,126 @@ TEST(XlaBuilderTest, UnboundedAllReduce) { GmockMatch(m::Op().WithShapeEqualTo(&expected))); } +TEST(XlaBuilderTest, UnboundedAllToAllDynamicSplitDimension) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]")); + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 45]")); + AllToAll(/*operand=*/Parameter(&b, 0, operand, "operand"), + /*split_dimension=*/0, + /*concat_dimension=*/1, + /*split_count=*/3, + /*replica_groups=*/{}); + TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr module, + BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + +TEST(XlaBuilderTest, UnboundedAllToAllDynamicConcatDimension) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]")); + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 5]")); + AllToAll(/*operand=*/Parameter(&b, 0, operand, "operand"), + /*split_dimension=*/1, + /*concat_dimension=*/0, + /*split_count=*/3, + /*replica_groups=*/{}); + TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr module, + BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + +TEST(XlaBuilderTest, UnboundedAllToAllDynamicSplitAndConcatDimensionEqual) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]")); + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 15]")); + AllToAll(/*operand=*/Parameter(&b, 0, operand, "operand"), + /*split_dimension=*/0, + /*concat_dimension=*/0, + /*split_count=*/3, + /*replica_groups=*/{}); + TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr module, + BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + +TEST(XlaBuilderTest, UnboundedAllToAllFullyDynamic) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, ?]")); + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, ?]")); + AllToAll(/*operand=*/Parameter(&b, 0, operand, "operand"), + /*split_dimension=*/0, + /*concat_dimension=*/1, + /*split_count=*/3, + /*replica_groups=*/{}); + TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr module, + BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + +TEST(XlaBuilderTest, UnboundedAllToAllTupleVariadicUnsupported) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]{1,0}")); + b.ReportErrorOrReturn( + AllToAllTuple(/*operands=*/{Parameter(&b, 0, operand, "operand0"), + Parameter(&b, 1, operand, "operand1")}, + /*replica_groups=*/{})); + EXPECT_THAT( + BuildHloModule(b), + StatusIs(_, + HasSubstr( + "AllToAllTuple does not support unbounded dynamic shapes"))); +} + +TEST(XlaBuilderTest, UnboundedAllToAllTupleUnsupported) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]{1,0}")); + b.ReportErrorOrReturn( + AllToAllTuple(/*operand=*/Parameter(&b, 0, operand, "operand"), + /*split_dimension=*/0, + /*concat_dimension=*/1, + /*split_count=*/3, + /*replica_groups=*/{})); + EXPECT_THAT( + BuildHloModule(b), + StatusIs(_, + HasSubstr( + "AllToAllTuple does not support unbounded dynamic shapes"))); +} + +TEST(XlaBuilderTest, BoundedAllToAllTupleUnsupported) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, <=15]{1,0}")); + b.ReportErrorOrReturn( + AllToAllTuple(/*operand=*/Parameter(&b, 0, operand, "operand"), + /*split_dimension=*/0, + /*concat_dimension=*/1, + /*split_count=*/3, + /*replica_groups=*/{})); + EXPECT_THAT( + BuildHloModule(b), + StatusIs(_, + HasSubstr("AllToAll does not support bounded dynamic shapes"))); +} + +TEST(XlaBuilderTest, BoundedAllToAllUnsupported) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, <=15]{1,0}")); + b.ReportErrorOrReturn( + AllToAllTuple(/*operand=*/Parameter(&b, 0, operand, "operand"), + /*split_dimension=*/0, + /*concat_dimension=*/1, + /*split_count=*/3, + /*replica_groups=*/{})); + EXPECT_THAT( + BuildHloModule(b), + StatusIs(_, + HasSubstr("AllToAll does not support bounded dynamic shapes"))); +} + TEST(XlaBuilderTest, UnboundedAnd) { XlaBuilder b(TestName()); TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, @@ -2393,6 +2561,54 @@ TEST(XlaBuilderTest, UnboundedDynamicUpdateSlice) { GmockMatch(m::Op().WithShapeEqualTo(&expected))); } +TEST(XlaBuilderTest, UnboundedFftWithFFT) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[2, <=5, ?]")); + const std::vector fft_length = {5, 10}; + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[2, <=5, ?]")); + Fft(Parameter(&b, 0, operand, "operand"), /*fft_type=*/FftType::FFT, + fft_length); + TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + +TEST(XlaBuilderTest, UnboundedFftWithIFFT) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[2, <=5, ?]")); + const std::vector fft_length = {5, 10}; + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[2, <=5, ?]")); + Fft(Parameter(&b, 0, operand, "operand"), /*fft_type=*/FftType::IFFT, + fft_length); + TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + +TEST(XlaBuilderTest, UnboundedFftWithRFFT) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f64[2, <=5, ?]")); + const std::vector fft_length = {5, 10}; + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c128[2, <=5, 6]")); + Fft(Parameter(&b, 0, operand, "operand"), /*fft_type=*/FftType::RFFT, + fft_length); + TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + +TEST(XlaBuilderTest, UnboundedFftWithIRFFT) { + XlaBuilder b(TestName()); + TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c128[2, <=5, ?]")); + const std::vector fft_length = {5, 10}; + TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f64[2, <=5, 10]")); + Fft(Parameter(&b, 0, operand, "operand"), /*fft_type=*/FftType::IRFFT, + fft_length); + TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b)); + EXPECT_THAT(GetRoot(*module), + GmockMatch(m::Op().WithShapeEqualTo(&expected))); +} + TEST(XlaBuilderTest, UnboundedGather) { XlaBuilder b(TestName()); TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, 4, 2]")); diff --git a/third_party/xla/xla/debug_options_flags.cc b/third_party/xla/xla/debug_options_flags.cc index 3b31ed9bc3f0b7..ba4561802cf8fb 100644 --- a/third_party/xla/xla/debug_options_flags.cc +++ b/third_party/xla/xla/debug_options_flags.cc @@ -78,8 +78,6 @@ DebugOptions DefaultDebugOptionsIgnoringFlags() { #ifdef XLA_CPU_USE_ACL opts.set_xla_cpu_use_acl(true); #endif - opts.set_xla_cpu_use_xla_runtime(false); - opts.set_xla_cpu_sparse_cuda_threads(0); opts.set_xla_cpu_enable_fast_math(false); // Disable forms of fast math that have caused users problems in the past. @@ -111,10 +109,8 @@ DebugOptions DefaultDebugOptionsIgnoringFlags() { opts.add_xla_gpu_enable_command_buffer(DebugOptions::CUBLAS); opts.add_xla_gpu_enable_command_buffer(DebugOptions::CUSTOM_CALL); opts.add_xla_gpu_enable_command_buffer(DebugOptions::CUDNN); - opts.set_xla_gpu_graph_num_runs_to_instantiate(-1); opts.set_xla_gpu_graph_min_graph_size(5); opts.set_xla_gpu_graph_enable_concurrent_region(false); - opts.set_xla_gpu_graph_eviction_timeout_seconds(60); // Despite the name, fast min/max on GPUs does not seem to be any faster, and // adds very counter-intuitive "NaN-swallowing" behavior. @@ -138,7 +134,6 @@ DebugOptions DefaultDebugOptionsIgnoringFlags() { opts.set_xla_detailed_logging(true); opts.set_xla_enable_dumping(true); - opts.set_xla_gpu_enable_xla_runtime_executable(false); opts.set_xla_gpu_enable_custom_fusions(false); opts.set_xla_gpu_enable_address_computation_fusion(true); opts.set_xla_gpu_nccl_termination_timeout_seconds(-1); @@ -152,7 +147,6 @@ DebugOptions DefaultDebugOptionsIgnoringFlags() { opts.set_xla_gpu_redzone_padding_bytes(8 * 1024 * 1024); opts.set_xla_gpu_shape_checks(DebugOptions::RUNTIME); opts.set_xla_gpu_normalize_layouts(true); - opts.set_xla_gpu_simplify_all_fp_conversions(true); opts.set_xla_dump_latency_hiding_schedule(false); opts.set_xla_gpu_enable_latency_hiding_scheduler(false); opts.set_xla_gpu_lhs_enable_gpu_async_tracker(true); @@ -211,6 +205,8 @@ DebugOptions DefaultDebugOptionsIgnoringFlags() { opts.set_xla_gpu_cublas_fallback(true); opts.set_xla_gpu_cudnn_gemm_fusion_level(0); opts.set_xla_gpu_enable_while_loop_double_buffering(false); + opts.set_xla_gpu_enable_while_loop_unrolling( + DebugOptions::WHILE_LOOP_UNROLLING_NO_UNROLL); opts.set_xla_gpu_ensure_minor_dot_contraction_dims(false); opts.set_xla_gpu_filter_kernels_spilling_registers_on_autotuning(true); opts.set_xla_gpu_llvm_verification_level(0); @@ -253,6 +249,10 @@ DebugOptions DefaultDebugOptionsIgnoringFlags() { opts.set_xla_reduce_window_rewrite_base_length(32); + opts.set_xla_gpu_require_complete_aot_autotune_results(false); + + opts.set_xla_gpu_enable_host_memory_offloading(false); + return opts; } @@ -764,17 +764,6 @@ void MakeDebugOptionsFlags(std::vector* flag_list, "xla_cpu_use_acl", bool_setter_for(&DebugOptions::set_xla_cpu_use_acl), debug_options->xla_cpu_use_acl(), "Generate calls to ACL (Arm Compute Library) in the CPU backend.")); - flag_list->push_back( - tsl::Flag("xla_cpu_use_xla_runtime", - bool_setter_for(&DebugOptions::set_xla_cpu_use_xla_runtime), - debug_options->xla_cpu_use_xla_runtime(), - "Enable XLA Runtime in the CPU backend.")); - flag_list->push_back(tsl::Flag( - "xla_cpu_sparse_cuda_threads", - int32_setter_for(&DebugOptions::set_xla_cpu_sparse_cuda_threads), - debug_options->xla_cpu_sparse_cuda_threads(), - "Sets number fo CUDA threads for sparse GPU acceleration in the CPU " - "backend (0 = off).")); flag_list->push_back(tsl::Flag( "xla_gpu_crash_on_verification_failures", bool_setter_for( @@ -1023,6 +1012,11 @@ void MakeDebugOptionsFlags(std::vector* flag_list, bool_setter_for(&DebugOptions::set_xla_gpu_deterministic_ops), debug_options->xla_gpu_deterministic_ops(), "Guarantees run-to-run determinism on GPU.")); + flag_list->push_back(tsl::Flag( + "xla_gpu_exclude_nondeterministic_ops", + bool_setter_for(&DebugOptions::set_xla_gpu_exclude_nondeterministic_ops), + debug_options->xla_gpu_exclude_nondeterministic_ops(), + "Excludes non-deterministic ops from compiled executables.")); flag_list->push_back(tsl::Flag( "xla_gpu_disable_async_collectives", setter_for_xla_gpu_disable_async_collectives, @@ -1146,13 +1140,6 @@ void MakeDebugOptionsFlags(std::vector* flag_list, " can either be a list of command types or a list of command types with" " + and - as prefix, which indicate adding or removing a command type" " to/from the default list.")); - flag_list->push_back(tsl::Flag( - "xla_gpu_graph_num_runs_to_instantiate", - int32_setter_for( - &DebugOptions::set_xla_gpu_graph_num_runs_to_instantiate), - debug_options->xla_gpu_graph_num_runs_to_instantiate(), - "Instantiate a gpu graph after the time a captured function is executed " - "reaches the threshold.")); flag_list->push_back(tsl::Flag( "xla_gpu_graph_min_graph_size", int32_setter_for(&DebugOptions::set_xla_gpu_graph_min_graph_size), @@ -1166,14 +1153,6 @@ void MakeDebugOptionsFlags(std::vector* flag_list, debug_options->xla_gpu_graph_enable_concurrent_region(), "Identify concurrent regions in gpu graphs and execute them " "concurrently.")); - flag_list->push_back(tsl::Flag( - "xla_gpu_graph_eviction_timeout_seconds", - int32_setter_for( - &DebugOptions::set_xla_gpu_graph_eviction_timeout_seconds), - debug_options->xla_gpu_graph_eviction_timeout_seconds(), - "Timeout in seconds to evict instantiated Gpu graphs from device. When " - "XLA instantiates new Gpu graphs, it evicts graphs that were not " - "recently executed to free space on device.")); flag_list->push_back( tsl::Flag("xla_dump_disable_metadata", @@ -1195,11 +1174,6 @@ void MakeDebugOptionsFlags(std::vector* flag_list, "MLIR will be in the llvm-parsable format and can be processed by " "mlir-opt tools. " "Pretty print form is not legal MLIR.")); - flag_list->push_back(tsl::Flag( - "xla_gpu_enable_xla_runtime_executable", - bool_setter_for(&DebugOptions::set_xla_gpu_enable_xla_runtime_executable), - debug_options->xla_gpu_enable_xla_runtime_executable(), - "Whether to enable XLA runtime for XLA:GPU backend")); flag_list->push_back(tsl::Flag( "xla_gpu_enable_custom_fusions", bool_setter_for(&DebugOptions::set_xla_gpu_enable_custom_fusions), @@ -1263,11 +1237,6 @@ void MakeDebugOptionsFlags(std::vector* flag_list, "Amount of padding the redzone allocator will put on one side of each " "buffer it allocates. (So the buffer's total size will be increased by " "2x this value.)")); - flag_list->push_back(tsl::Flag( - "xla_gpu_simplify_all_fp_conversions", - bool_setter_for(&DebugOptions::set_xla_gpu_simplify_all_fp_conversions), - debug_options->xla_gpu_simplify_all_fp_conversions(), - "Allows any chain of floating-point conversions to be simplified.")); flag_list->push_back(tsl::Flag( "xla_gpu_shape_checks", setter_for_xla_gpu_shape_checks, DebugOptions::ShapeChecks_Name(debug_options->xla_gpu_shape_checks()), @@ -1467,7 +1436,7 @@ void MakeDebugOptionsFlags(std::vector* flag_list, "xla_gpu_require_complete_aot_autotune_results", bool_setter_for( &DebugOptions::set_xla_gpu_require_complete_aot_autotune_results), - debug_options->xla_gpu_multi_streamed_windowed_einsum(), + debug_options->xla_gpu_require_complete_aot_autotune_results(), "Whether to require complete AOT autotuning results.")); flag_list->push_back(tsl::Flag( "xla_gpu_auto_spmd_partitioning_memory_budget_gb", @@ -1506,6 +1475,14 @@ void MakeDebugOptionsFlags(std::vector* flag_list, "Dumps autotuned GEMM fusions to the directory specified by " "xla_dump_to or stdout. Each fusion is dumped only once, as an optimized " "HLO.")); + flag_list->push_back(tsl::Flag( + "xla_gpu_override_gemm_autotuner", + string_setter_for(&DebugOptions::set_xla_gpu_override_gemm_autotuner), + debug_options->xla_gpu_override_gemm_autotuner(), + "Overrides the GEMM autotuner to use the specified " + "(AutotuneResult::TritonGemmKey) textproto configuration for all Triton " + "GEMM fusions. (You can get such textprotos from the debug logs of the " + "GEMM autotuner.) ")); flag_list->push_back(tsl::Flag( "xla_gpu_copy_insertion_use_region_analysis", bool_setter_for( @@ -1700,6 +1677,11 @@ void MakeDebugOptionsFlags(std::vector* flag_list, &DebugOptions::set_xla_reduce_window_rewrite_base_length), debug_options->xla_reduce_window_rewrite_base_length(), "Base length to rewrite the reduce window to, no rewrite if set to 0.")); + flag_list->push_back(tsl::Flag( + "xla_gpu_enable_host_memory_offloading", + bool_setter_for(&DebugOptions::set_xla_gpu_enable_host_memory_offloading), + debug_options->xla_gpu_enable_host_memory_offloading(), + "Whether to trigger host memory offloading on a device.")); } // NOLINT(readability/fn_size) // Allocates flag_values and flag_objects; this function must not be called more diff --git a/third_party/xla/xla/examples/axpy/stablehlo_compile_test.cc b/third_party/xla/xla/examples/axpy/stablehlo_compile_test.cc index f8c54b33cb4d2f..7072f49a6544ca 100644 --- a/third_party/xla/xla/examples/axpy/stablehlo_compile_test.cc +++ b/third_party/xla/xla/examples/axpy/stablehlo_compile_test.cc @@ -72,13 +72,12 @@ TEST(StableHloAxpyTest, LoadAndRunCpuExecutable) { // The PjRtStreamExecutorClient will allow us to compile and execute // computations on the device we just configured. - auto pjrt_se_client = PjRtStreamExecutorClient( - "cpu", local_client, std::move(devices), - std::vector>(), - /*process_index=*/0, /*allocator=*/nullptr, - /*host_memory_allocator=*/nullptr, - /*should_stage_host_to_device_transfers=*/false, - /*gpu_run_options=*/nullptr); + auto pjrt_se_client = + PjRtStreamExecutorClient("cpu", local_client, std::move(devices), + /*process_index=*/0, /*allocator=*/nullptr, + /*host_memory_allocator=*/nullptr, + /*should_stage_host_to_device_transfers=*/false, + /*gpu_run_options=*/nullptr); // Read StableHLO program to string. std::string program_path = tsl::io::JoinPath( diff --git a/third_party/xla/xla/executable_run_options.h b/third_party/xla/xla/executable_run_options.h index f02e5e73d68c3a..c6a4897c2067ec 100644 --- a/third_party/xla/xla/executable_run_options.h +++ b/third_party/xla/xla/executable_run_options.h @@ -103,8 +103,8 @@ using ThenExecuteFunction = // recorded on a `stream` once the send operation is completed and data was // copied from the `src` memory. `frontend_attrs` contains frontend specific // attributes for the send. -using SendDeviceMemoryFunction = - std::function>( +using SendDeviceMemoryFunction = std::function< + absl::StatusOr>>( int64_t channel_id, stream_executor::Stream* stream, const Shape& shape, const stream_executor::DeviceMemoryBase& src, const absl::flat_hash_map& frontend_attrs)>; @@ -113,8 +113,8 @@ using SendDeviceMemoryFunction = // recorded on a `stream` once the recv operation is completed and data was // copied into the `dst` memory. `frontend_attrs` contains frontend specific // attributes for the receive. -using RecvDeviceMemoryFunction = - std::function>( +using RecvDeviceMemoryFunction = std::function< + absl::StatusOr>>( int64_t channel_id, stream_executor::Stream* stream, const Shape& shape, stream_executor::DeviceMemoryBase* dst, const absl::flat_hash_map& frontend_attrs)>; diff --git a/third_party/xla/xla/ffi/BUILD b/third_party/xla/xla/ffi/BUILD index dbc3785e8d5e56..3fe3e55c5d8e97 100644 --- a/third_party/xla/xla/ffi/BUILD +++ b/third_party/xla/xla/ffi/BUILD @@ -24,6 +24,7 @@ cc_library( "//xla/ffi/api:c_api_internal", "//xla/stream_executor:device_memory", "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/base:dynamic_annotations", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/log:check", "@com_google_absl//absl/types:span", @@ -35,10 +36,15 @@ cc_library( srcs = ["execution_context.cc"], hdrs = ["execution_context.h"], deps = [ + "@com_google_absl//absl/base:core_headers", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/status", "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings", + "@com_google_absl//absl/synchronization", + "@local_tsl//tsl/lib/gtl:int_type", + "@local_tsl//tsl/platform:logging", + "@local_tsl//tsl/platform:statusor", ], ) @@ -97,6 +103,22 @@ cc_library( ], ) +cc_library( + name = "attribute_map", + srcs = ["attribute_map.cc"], + hdrs = ["attribute_map.h"], + deps = [ + ":call_frame", + "@com_google_absl//absl/status", + "@com_google_absl//absl/status:statusor", + "@com_google_absl//absl/strings", + "@llvm-project//llvm:Support", + "@llvm-project//mlir:IR", + "@llvm-project//mlir:Support", + "@local_tsl//tsl/platform:errors", + ], +) + xla_cc_test( name = "ffi_test", srcs = ["ffi_test.cc"], diff --git a/third_party/xla/xla/ffi/api/api.h b/third_party/xla/xla/ffi/api/api.h index 75b678d9490c10..d0299022a630b2 100644 --- a/third_party/xla/xla/ffi/api/api.h +++ b/third_party/xla/xla/ffi/api/api.h @@ -150,10 +150,8 @@ XLA_FFI_Error* Ffi::RegisterStaticHandler(const XLA_FFI_Api* api, XLA_FFI_Handler_Register_Args args; args.struct_size = XLA_FFI_Handler_Register_Args_STRUCT_SIZE; args.priv = nullptr; - args.name = XLA_FFI_ByteSpan{XLA_FFI_ByteSpan_STRUCT_SIZE, nullptr, - name.data(), name.size()}; - args.platform = XLA_FFI_ByteSpan{XLA_FFI_ByteSpan_STRUCT_SIZE, nullptr, - platform.data(), platform.size()}; + args.name = XLA_FFI_ByteSpan{name.data(), name.size()}; + args.platform = XLA_FFI_ByteSpan{platform.data(), platform.size()}; args.handler = handler; args.traits = traits; return api->XLA_FFI_Handler_Register(&args); @@ -822,18 +820,32 @@ class Unexpected; template class Expected { public: - Expected(T value) : data_(std::move(value)) {} // NOLINT - Expected(Unexpected u); // NOLINT + constexpr Expected(T value) : data_(std::move(value)) {} // NOLINT + constexpr Expected(Unexpected u); // NOLINT - operator bool() const { // NOLINT + constexpr operator bool() const { // NOLINT return has_value(); } - T operator*() const { return value(); } - T* operator->() const { return &value(); } - bool has_value() const { return std::holds_alternative(data_); } - T value() const { return std::get(data_); } - E error() const { return std::get(data_); } + constexpr T& operator*() & { return value(); } + constexpr const T& operator*() const& { return value(); } + constexpr T&& operator*() && { return std::move(value()); } + constexpr const T& operator*() const&& { return std::move(value()); } + + constexpr T* operator->() { return &value(); } + constexpr const T* operator->() const { return &value(); } + + constexpr bool has_value() const { return std::holds_alternative(data_); } + + constexpr T& value() & { return std::get(data_); } + constexpr const T& value() const& { return std::get(data_); } + constexpr T&& value() && { return std::get(std::move(data_)); } + constexpr const T& value() const&& { return std::get(std::move(data_)); } + + constexpr E& error() & { return std::get(data_); } + constexpr const E& error() const& { return std::get(data_); } + constexpr E&& error() && { return std::get(std::move(data_)); } + constexpr const E&& error() const&& { return std::get(std::move(data_)); } private: std::variant data_; @@ -842,7 +854,7 @@ class Expected { template class Unexpected { public: - explicit Unexpected(E error) : error_(std::move(error)) {} + explicit constexpr Unexpected(E error) : error_(std::move(error)) {} private: template @@ -854,7 +866,8 @@ class Unexpected { Unexpected(const char*) -> Unexpected; template -Expected::Expected(Unexpected u) : data_(std::move(u.error_)) {} +constexpr Expected::Expected(Unexpected u) + : data_(std::move(u.error_)) {} //===----------------------------------------------------------------------===// // Type-safe wrapper for accessing a variable number of arguments. diff --git a/third_party/xla/xla/ffi/api/c_api.h b/third_party/xla/xla/ffi/api/c_api.h index 39d4d8feef825e..243e5d3e2bea51 100644 --- a/third_party/xla/xla/ffi/api/c_api.h +++ b/third_party/xla/xla/ffi/api/c_api.h @@ -217,44 +217,38 @@ typedef enum { typedef struct XLA_FFI_ExecutionContext XLA_FFI_ExecutionContext; //===----------------------------------------------------------------------===// -// Call frame +// Primitives. //===----------------------------------------------------------------------===// +// TypeId uniquely identifies a user-defined type in a given XLA FFI instance. +struct XLA_FFI_TypeId { + int64_t type_id; +}; + // We use byte spans to pass strings to handlers because strings might not be // null terminated, and even if they are, looking for a null terminator can // become very expensive in tight loops. struct XLA_FFI_ByteSpan { - size_t struct_size; - void* priv; - const char* ptr; size_t len; }; -XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_ByteSpan, len); - // A struct to pass a scalar value to FFI handler. struct XLA_FFI_Scalar { - size_t struct_size; - void* priv; - XLA_FFI_DataType dtype; void* value; }; -XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Scalar, value); - // A struct to pass a dense array to FFI handler. struct XLA_FFI_Array { - size_t struct_size; - void* priv; - XLA_FFI_DataType dtype; size_t size; void* data; }; -XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Array, data); +//===----------------------------------------------------------------------===// +// Call frame +//===----------------------------------------------------------------------===// struct XLA_FFI_Args { size_t struct_size; @@ -336,6 +330,24 @@ XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Handler_Register_Args, traits); typedef XLA_FFI_Error* XLA_FFI_Handler_Register( XLA_FFI_Handler_Register_Args* args); +//===----------------------------------------------------------------------===// +// TypeId +//===----------------------------------------------------------------------===// + +struct XLA_FFI_TypeId_Register_Args { + size_t struct_size; + void* priv; + + XLA_FFI_ByteSpan name; + XLA_FFI_TypeId* type_id; // out +}; + +XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_TypeId_Register_Args, type_id); + +// Registers user type `name` and returns a unique `type_id`. +typedef XLA_FFI_Error* XLA_FFI_TypeId_Register( + XLA_FFI_TypeId_Register_Args* args); + //===----------------------------------------------------------------------===// // ExecutionContext //===----------------------------------------------------------------------===// @@ -345,7 +357,7 @@ struct XLA_FFI_ExecutionContext_Get_Args { void* priv; XLA_FFI_ExecutionContext* ctx; - XLA_FFI_ByteSpan id; + XLA_FFI_TypeId* type_id; void* data; // out }; @@ -390,6 +402,7 @@ struct XLA_FFI_Api { _XLA_FFI_API_STRUCT_FIELD(XLA_FFI_Error_Destroy); _XLA_FFI_API_STRUCT_FIELD(XLA_FFI_Handler_Register); _XLA_FFI_API_STRUCT_FIELD(XLA_FFI_Stream_Get); + _XLA_FFI_API_STRUCT_FIELD(XLA_FFI_TypeId_Register); _XLA_FFI_API_STRUCT_FIELD(XLA_FFI_ExecutionContext_Get); }; diff --git a/third_party/xla/xla/ffi/api/ffi.h b/third_party/xla/xla/ffi/api/ffi.h index addb7af543c8cf..3d56c00b94bda2 100644 --- a/third_party/xla/xla/ffi/api/ffi.h +++ b/third_party/xla/xla/ffi/api/ffi.h @@ -16,12 +16,12 @@ limitations under the License. #ifndef XLA_FFI_API_FFI_H_ #define XLA_FFI_API_FFI_H_ -#include #ifdef XLA_FFI_FFI_H_ #error Two different XLA FFI implementations cannot be included together #endif // XLA_FFI_FFI_H_ #include +#include #include #include #include @@ -29,6 +29,7 @@ limitations under the License. #include #include #include +#include #include #include #include @@ -567,27 +568,51 @@ struct CtxDecoding> { // UserData //===----------------------------------------------------------------------===// +// All user data types that are passed via the execution context must be +// registered with the XLA FFI ahead of time to get unique type id. +using TypeId = XLA_FFI_TypeId; // NOLINT + +inline XLA_FFI_Error* RegisterType(const XLA_FFI_Api* api, + std::string_view name, + XLA_FFI_TypeId* type_id) { + XLA_FFI_TypeId_Register_Args args; + args.struct_size = XLA_FFI_TypeId_Register_Args_STRUCT_SIZE; + args.priv = nullptr; + args.name = XLA_FFI_ByteSpan{name.data(), name.size()}; + args.type_id = type_id; + return api->XLA_FFI_TypeId_Register(&args); +} + +#define XLA_FFI_REGISTER_TYPE(API, NAME, TYPE_ID) \ + XLA_FFI_REGISTER_TYPE_(API, NAME, TYPE_ID, __COUNTER__) +#define XLA_FFI_REGISTER_TYPE_(API, NAME, TYPE_ID, N) \ + XLA_FFI_ATTRIBUTE_UNUSED static const XLA_FFI_Error* \ + xla_ffi_type_##N##_registered_ = \ + [] { return ::xla::ffi::RegisterType(API, NAME, TYPE_ID); }() + // A type tag for automatic decoding user data passed via the execution context. -template +template struct UserData {}; -template -struct CtxDecoding> { +template +struct CtxDecoding> { using Type = T*; + static_assert(std::is_same_v, + "UserData type must have a static `TypeId id` field"); + static std::optional Decode(const XLA_FFI_Api* api, XLA_FFI_ExecutionContext* ctx, DiagnosticEngine& diagnostic) { - static constexpr std::string_view id_view = {id}; - XLA_FFI_ExecutionContext_Get_Args args; args.struct_size = XLA_FFI_ExecutionContext_Get_Args_STRUCT_SIZE; args.priv = nullptr; args.ctx = ctx; - args.id = XLA_FFI_ByteSpan{XLA_FFI_ByteSpan_STRUCT_SIZE, nullptr, - id_view.data(), id_view.size()}; + args.type_id = &T::id; args.data = nullptr; + assert(args.type_id->type_id > 0 && "type must be registered with XLA FFI"); + if (XLA_FFI_Error* err = api->XLA_FFI_ExecutionContext_Get(&args); err) { diagnostic.Emit("Failed to get user data from execution context: ") << internal::ErrorUtil::GetErrorMessage(api, err); diff --git a/third_party/xla/xla/ffi/api/ffi_test.cc b/third_party/xla/xla/ffi/api/ffi_test.cc index c47e16b22e7de3..b963b99c5689fd 100644 --- a/third_party/xla/xla/ffi/api/ffi_test.cc +++ b/third_party/xla/xla/ffi/api/ffi_test.cc @@ -352,17 +352,19 @@ TEST(FfiTest, PointerAttr) { } struct MyData { + static TypeId id; std::string str; }; -TEST(FfiTest, UserData) { - static constexpr char kId[] = "my_data"; +TypeId MyData::id = {}; // zero-initialize type id +XLA_FFI_REGISTER_TYPE(GetXlaFfiApi(), "my_data", &MyData::id); +TEST(FfiTest, UserData) { MyData data{"foo"}; - auto deleter = +[](void*) {}; ExecutionContext execution_context; - TF_ASSERT_OK(execution_context.Emplace(kId, &data, deleter)); + TF_ASSERT_OK(execution_context.Insert( + ExecutionContext::TypeId(MyData::id.type_id), &data)); CallFrameBuilder builder; auto call_frame = builder.Build(); @@ -372,7 +374,7 @@ TEST(FfiTest, UserData) { return Error::Success(); }; - auto handler = Ffi::Bind().Ctx>().To(fn); + auto handler = Ffi::Bind().Ctx>().To(fn); ServiceExecutableRunOptions service_run_options; service_run_options.mutable_run_options()->set_ffi_execution_context( diff --git a/third_party/xla/xla/ffi/attribute_map.cc b/third_party/xla/xla/ffi/attribute_map.cc new file mode 100644 index 00000000000000..8ef42c30085b19 --- /dev/null +++ b/third_party/xla/xla/ffi/attribute_map.cc @@ -0,0 +1,129 @@ +/* Copyright 2024 The OpenXLA Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "xla/ffi/attribute_map.h" + +#include +#include + +#include "absl/status/status.h" +#include "absl/status/statusor.h" +#include "absl/strings/str_cat.h" +#include "llvm/ADT/TypeSwitch.h" +#include "mlir/IR/Attributes.h" // from @llvm-project +#include "mlir/IR/BuiltinAttributes.h" // from @llvm-project +#include "mlir/Support/LLVM.h" // from @llvm-project +#include "xla/ffi/call_frame.h" +#include "tsl/platform/errors.h" + +using FlatAttribute = xla::ffi::CallFrameBuilder::FlatAttribute; +using FlatAttributesMap = xla::ffi::CallFrameBuilder::FlatAttributesMap; + +namespace xla::ffi { + +absl::StatusOr BuildAttributesMap( + mlir::DictionaryAttr dict) { + FlatAttributesMap attributes; + for (auto& kv : dict) { + std::string_view name = kv.getName().strref(); + + auto boolean = [&](mlir::BoolAttr boolean) { + attributes[name] = static_cast(boolean.getValue()); + return absl::OkStatus(); + }; + + auto integer = [&](mlir::IntegerAttr integer) { + switch (integer.getType().getIntOrFloatBitWidth()) { + case 1: + attributes[name] = static_cast(integer.getInt()); + return absl::OkStatus(); + case 8: + attributes[name] = static_cast(integer.getInt()); + return absl::OkStatus(); + case 16: + attributes[name] = static_cast(integer.getInt()); + return absl::OkStatus(); + case 32: + attributes[name] = static_cast(integer.getInt()); + return absl::OkStatus(); + case 64: + attributes[name] = static_cast(integer.getInt()); + return absl::OkStatus(); + default: + return absl::InvalidArgumentError(absl::StrCat( + "Unsupported integer attribute bit width for attribute: ", name)); + } + }; + + auto fp = [&](mlir::FloatAttr fp) { + switch (fp.getType().getIntOrFloatBitWidth()) { + case 32: + attributes[name] = static_cast(fp.getValue().convertToFloat()); + return absl::OkStatus(); + case 64: + attributes[name] = + static_cast(fp.getValue().convertToDouble()); + return absl::OkStatus(); + default: + return absl::InvalidArgumentError(absl::StrCat( + "Unsupported float attribute bit width for attribute: ", name)); + } + }; + + auto arr = [&](mlir::DenseArrayAttr arr) { + if (auto dense = mlir::dyn_cast(arr)) { + attributes[name] = dense.asArrayRef().vec(); + return absl::OkStatus(); + } else if (auto dense = mlir::dyn_cast(arr)) { + attributes[name] = dense.asArrayRef().vec(); + return absl::OkStatus(); + } else if (auto dense = mlir::dyn_cast(arr)) { + attributes[name] = dense.asArrayRef().vec(); + return absl::OkStatus(); + } else if (auto dense = mlir::dyn_cast(arr)) { + attributes[name] = dense.asArrayRef().vec(); + return absl::OkStatus(); + } else if (auto dense = mlir::dyn_cast(arr)) { + attributes[name] = dense.asArrayRef().vec(); + return absl::OkStatus(); + } else if (auto dense = mlir::dyn_cast(arr)) { + attributes[name] = dense.asArrayRef().vec(); + return absl::OkStatus(); + } else { + return absl::InvalidArgumentError(absl::StrCat( + "Unsupported array element type for attribute: ", name)); + } + }; + + auto str = [&](mlir::StringAttr str) { + attributes[name] = str.getValue().str(); + return absl::OkStatus(); + }; + + TF_RETURN_IF_ERROR( + llvm::TypeSwitch(kv.getValue()) + .Case(boolean) + .Case(integer) + .Case(fp) + .Case(arr) + .Case(str) + .Default([&](mlir::Attribute) { + return absl::InvalidArgumentError(absl::StrCat( + "Unsupported attribute type for attribute: ", name)); + })); + } + return attributes; +} +} // namespace xla::ffi diff --git a/third_party/xla/xla/ffi/attribute_map.h b/third_party/xla/xla/ffi/attribute_map.h new file mode 100644 index 00000000000000..d6c37b31c5522b --- /dev/null +++ b/third_party/xla/xla/ffi/attribute_map.h @@ -0,0 +1,32 @@ +/* Copyright 2024 The OpenXLA Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef XLA_FFI_ATTRIBUTE_MAP_H_ +#define XLA_FFI_ATTRIBUTE_MAP_H_ + +#include "absl/status/statusor.h" +#include "mlir/IR/BuiltinAttributes.h" // from @llvm-project +#include "xla/ffi/call_frame.h" + +namespace xla::ffi { + +// Converts MLIR dictionary attribute attached to a custom call operation to a +// custom call handler attributes that are forwarded to the FFI handler. +absl::StatusOr BuildAttributesMap( + mlir::DictionaryAttr dict); + +} // namespace xla::ffi + +#endif // XLA_FFI_ATTRIBUTE_MAP_H_ diff --git a/third_party/xla/xla/ffi/call_frame.cc b/third_party/xla/xla/ffi/call_frame.cc index 867c7ccb35edf2..cb5ae84822bf4f 100644 --- a/third_party/xla/xla/ffi/call_frame.cc +++ b/third_party/xla/xla/ffi/call_frame.cc @@ -26,6 +26,7 @@ limitations under the License. #include #include "absl/algorithm/container.h" +#include "absl/base/dynamic_annotations.h" #include "absl/log/check.h" #include "absl/types/span.h" #include "xla/ffi/api/c_api.h" @@ -83,12 +84,16 @@ void CallFrameBuilder::AddBufferArg(se::DeviceMemoryBase memory, PrimitiveType type, absl::Span dims) { args_.push_back(Buffer{memory, type, {dims.begin(), dims.end()}}); + ABSL_ANNOTATE_MEMORY_IS_INITIALIZED( + args_.back().dims.data(), sizeof(int64_t) * args_.back().dims.size()); } void CallFrameBuilder::AddBufferRet(se::DeviceMemoryBase memory, PrimitiveType type, absl::Span dims) { rets_.push_back(Buffer{memory, type, {dims.begin(), dims.end()}}); + ABSL_ANNOTATE_MEMORY_IS_INITIALIZED( + rets_.back().dims.data(), sizeof(int64_t) * rets_.back().dims.size()); } void CallFrameBuilder::AddAttributes(AttributesMap attrs) { @@ -135,19 +140,19 @@ struct CallFrame::Dictionary { struct CallFrame::Array { CallFrameBuilder::Array value; // XLA_FFI_Array::data - XLA_FFI_Array array = {XLA_FFI_Array_STRUCT_SIZE, nullptr}; + XLA_FFI_Array array = {}; }; struct CallFrame::Scalar { CallFrameBuilder::Scalar value; // XLA_FFI_Scalar::value - XLA_FFI_Scalar scalar = {XLA_FFI_Scalar_STRUCT_SIZE, nullptr}; + XLA_FFI_Scalar scalar = {}; }; struct CallFrame::String { std::string value; // XLA_FFI_ByteSpan::ptr - XLA_FFI_ByteSpan span = {XLA_FFI_ByteSpan_STRUCT_SIZE, nullptr}; + XLA_FFI_ByteSpan span = {}; }; struct CallFrame::NamedAttribute { diff --git a/third_party/xla/xla/ffi/execution_context.cc b/third_party/xla/xla/ffi/execution_context.cc index 53aaeb7943a41c..2ea5f8bf37f736 100644 --- a/third_party/xla/xla/ffi/execution_context.cc +++ b/third_party/xla/xla/ffi/execution_context.cc @@ -15,73 +15,87 @@ limitations under the License. #include "xla/ffi/execution_context.h" +#include #include #include #include #include #include +#include "absl/base/attributes.h" +#include "absl/base/const_init.h" +#include "absl/container/flat_hash_map.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" +#include "absl/synchronization/mutex.h" namespace xla::ffi { -ExecutionContext::OpaqueUserData::OpaqueUserData( - void* data, OpaqueUserData::Deleter deleter) +ABSL_CONST_INIT absl::Mutex type_registry_mutex(absl::kConstInit); + +using TypeRegistry = absl::flat_hash_map; +static TypeRegistry& StaticTypeRegistry() { + static auto* registry = new TypeRegistry(); + return *registry; +} + +ExecutionContext::TypeId ExecutionContext::GetNextTypeId() { + static auto* counter = new std::atomic(1); + return TypeId(counter->fetch_add(1)); +} + +ExecutionContext::UserData::UserData(void* data, Deleter deleter) : data_(data), deleter_(std::move(deleter)) {} -ExecutionContext::OpaqueUserData::~OpaqueUserData() { +ExecutionContext::UserData::~UserData() { if (deleter_) deleter_(data_); } -absl::Status ExecutionContext::Emplace(std::string id, void* data, - OpaqueUserData::Deleter deleter) { - if (!data) return absl::InvalidArgumentError("User data must be not null"); +absl::StatusOr +ExecutionContext::RegisterExternalTypeId(std::string_view name) { + absl::MutexLock lock(&type_registry_mutex); + auto& registry = StaticTypeRegistry(); - auto emplaced = opaque_.emplace( - id, std::make_shared(data, std::move(deleter))); + // Try to emplace with type id zero and fill it with real type id only if we + // successfully acquired an entry for a given name. + auto emplaced = registry.emplace(name, TypeId(0)); if (!emplaced.second) { return absl::AlreadyExistsError( - absl::StrCat("Opaque user data with id ", id, - " already exists in execution context")); + absl::StrCat("Type id ", emplaced.first->second.value(), + " already registered for type name ", name)); } - - return absl::OkStatus(); + return emplaced.first->second = GetNextTypeId(); } -absl::StatusOr> -ExecutionContext::Lookup(std::string_view id) const { - auto it = opaque_.find(id); - if (it == opaque_.end()) { - return absl::NotFoundError(absl::StrCat("Opaque user data with id ", id, - " not found in execution context")); - } - return it->second; +absl::Status ExecutionContext::Insert(TypeId type_id, void* data, + Deleter deleter) { + return InsertUserData(type_id, + std::make_unique(data, std::move(deleter))); } -absl::Status ExecutionContext::Insert(int64_t type_id, - std::shared_ptr data) { +absl::Status ExecutionContext::InsertUserData(TypeId type_id, + std::unique_ptr data) { if (!data) return absl::InvalidArgumentError("User data must be not null"); - auto emplaced = typed_.emplace(type_id, std::move(data)); + auto emplaced = user_data_.emplace(type_id, std::move(data)); if (!emplaced.second) { return absl::AlreadyExistsError( - absl::StrCat("User data with type id ", type_id, + absl::StrCat("User data with type id ", type_id.value(), " already exists in execution context")); } - return absl::OkStatus(); } -absl::StatusOr> -ExecutionContext::Lookup(int64_t type_id) const { - auto it = typed_.find(type_id); - if (it == typed_.end()) { - return absl::NotFoundError(absl::StrCat("User data with type id ", type_id, +absl::StatusOr ExecutionContext::LookupUserData( + TypeId type_id) const { + auto it = user_data_.find(type_id); + if (it == user_data_.end()) { + return absl::NotFoundError(absl::StrCat("User data with type id ", + type_id.value(), " not found in execution context")); } - return it->second; + return it->second.get(); } } // namespace xla::ffi diff --git a/third_party/xla/xla/ffi/execution_context.h b/third_party/xla/xla/ffi/execution_context.h index 21d3c85e1facb4..1a5250913830c4 100644 --- a/third_party/xla/xla/ffi/execution_context.h +++ b/third_party/xla/xla/ffi/execution_context.h @@ -16,110 +16,128 @@ limitations under the License. #ifndef XLA_FFI_EXECUTION_CONTEXT_H_ #define XLA_FFI_EXECUTION_CONTEXT_H_ +#include #include #include #include -#include #include -#include #include #include "absl/container/flat_hash_map.h" #include "absl/status/status.h" #include "absl/status/statusor.h" +#include "tsl/lib/gtl/int_type.h" +#include "tsl/platform/logging.h" +#include "tsl/platform/statusor.h" namespace xla::ffi { // Execution context is a container for forwarding arbitrary user data to FFI -// handlers in the scope of a single execution. Execution context allows to pass -// arbitrary user data to FFI handlers via the side channel that does not -// require modifying HLO modules. There are two kinds of user data that can be -// passed to FFI handlers: +// handlers in the scope of a single XLA execution. Execution context allows to +// pass arbitrary user data to FFI handlers via the side channel that does not +// require modifying HLO modules. // -// 1. Opaque data. This is a wrapper for an opaque user data pointer that is -// useful when FFI handler is registered in the dynamically loaded library -// and we do not know the type of the data and can only work with the opaque -// pointer. +// From XLA FFI perspective user data is an opaque pointer that can be +// forwarded to the FFI handler. We rely on type id to guarantee that we forward +// user data of correct type. There are kinds of type ids: // -// 2. Typed data. This is useful when the FFI handler is registered in the same -// process and we can rely on global static variable to assign ids to types -// and we don't need to worry about breaking C++ ABI. +// 1. External type id. When FFI handlers defined in a dynamically loaded +// library, they must register types used in the execution context ahead +// of time and explicitly get a unique type id for them. // -// For internal FFI handlers we always use typed data, and use opaque data only -// if FFI handler has to be defined in a separate dynamically loaded library. +// 2. Internal type id. When FFI handler defined in the same binary we rely +// on a global static registry to automatically assing type ids. // // Examples: FFI handler can register a per-execution cache in the execution // context and get access to it in the FFI handler, with a guarantee that it is // unique between separate calls to XLA execute. class ExecutionContext { public: - // A base class for typed user data used for FFI handlers registered in the - // same process where we can safely pass around C++ objects. - class UserData { - public: - virtual ~UserData() = default; - }; + template + using Deleter = std::function; + + TSL_LIB_GTL_DEFINE_INT_TYPE(TypeId, int64_t); + + // Registers external type with a given name in a static type registry. + static absl::StatusOr RegisterExternalTypeId(std::string_view name); + + // Inserts opaque user data with a given type id and optional deleter. + absl::Status Insert(TypeId type_id, void* data, + Deleter deleter = nullptr); + // Inserts typed user data of type `T` and optional deleter. template - using IsUserData = std::enable_if_t>; + absl::Status Insert(T* data, Deleter deleter = nullptr); - // An RAII wrapper for opaque user data that is useful when FFI handler is - // registered in the dynamically loaded library and we do not know the type of - // the data and can only work with the opaque pointer. - class OpaqueUserData { - public: - using Deleter = std::function; + // Emplaces typed user data constructed from `args`. Execution context + // becomes the owner of the constructed object. + template + absl::Status Emplace(Args&&... args); + + // Looks up typed execution context data of type `T`. + template + absl::StatusOr Lookup() const { + TF_ASSIGN_OR_RETURN(auto user_data, LookupUserData(GetTypeId())); + return static_cast(user_data->data()); + } + + // Looks up opaque execution context data with given `type_id`. + absl::StatusOr Lookup(TypeId type_id) const { + TF_ASSIGN_OR_RETURN(auto user_data, LookupUserData(type_id)); + return user_data->data(); + } - OpaqueUserData(void* data, Deleter deleter); - ~OpaqueUserData(); + private: + // An RAII wrapper for opaque user data. Optional deleter will be called when + // UserData is destroyed together with the execution context. If deleter is + // nullptr then the caller is responsible for making sure that the pointer + // stays valid during the XLA execution and correctly destroyed afterwards. + class UserData { + public: + UserData(void* data, Deleter deleter); + ~UserData(); - OpaqueUserData(OpaqueUserData&) = delete; - OpaqueUserData& operator=(const OpaqueUserData&) = delete; + UserData(UserData&) = delete; + UserData& operator=(const UserData&) = delete; void* data() const { return data_; } private: void* data_; - Deleter deleter_; + Deleter deleter_; }; - // Emplaces opaque user data keyed by `id`. - absl::Status Emplace(std::string id, void* data, - OpaqueUserData::Deleter deleter); - - // Looks up opaque user data keyed by `id`. - absl::StatusOr> Lookup( - std::string_view id) const; - - // Emplaces typed user data constructed from `args`. - template * = nullptr> - absl::Status Emplace(Args&&... args) { - return Insert(GetTypeId(), - std::make_shared(std::forward(args)...)); - } + static TypeId GetNextTypeId(); - // Looks up typed execution context data of type `T`. - template * = nullptr> - absl::StatusOr> Lookup() const { - auto user_data = Lookup(GetTypeId()); - if (!user_data.ok()) return user_data.status(); - return std::static_pointer_cast(*std::move(user_data)); - } - - private: - template * = nullptr> - static int64_t GetTypeId() { - static const char id = 0; - return reinterpret_cast(&id); + template + static TypeId GetTypeId() { + static const TypeId id = GetNextTypeId(); + return id; } - absl::Status Insert(int64_t type_id, std::shared_ptr data); - absl::StatusOr> Lookup(int64_t type_id) const; + absl::Status InsertUserData(TypeId type_id, std::unique_ptr data); + absl::StatusOr LookupUserData(TypeId type_id) const; - absl::flat_hash_map> typed_; - absl::flat_hash_map> opaque_; + absl::flat_hash_map> user_data_; }; +template +absl::Status ExecutionContext::Insert(T* data, Deleter deleter) { + return InsertUserData(GetTypeId(), + std::make_unique( + data, [deleter = std::move(deleter)](void* data) { + if (deleter) deleter(static_cast(data)); + })); +} + +template +absl::Status ExecutionContext::Emplace(Args&&... args) { + return InsertUserData(GetTypeId(), + std::make_unique( + new T(std::forward(args)...), + [](void* data) { delete static_cast(data); })); +} + } // namespace xla::ffi #endif // XLA_FFI_EXECUTION_CONTEXT_H_ diff --git a/third_party/xla/xla/ffi/execution_context_test.cc b/third_party/xla/xla/ffi/execution_context_test.cc index 159e529396b7e7..5c4cb3fdaab666 100644 --- a/third_party/xla/xla/ffi/execution_context_test.cc +++ b/third_party/xla/xla/ffi/execution_context_test.cc @@ -25,36 +25,52 @@ limitations under the License. namespace xla::ffi { -struct StringUserData { - std::string data; -}; - -struct I32UserData : public ExecutionContext::UserData { +struct I32UserData { explicit I32UserData(int32_t value) : value(value) {} int32_t value; }; -TEST(ExecutionContextTest, OpaqueUserData) { - StringUserData string_data = {"foo"}; - auto deleter = [](void*) {}; +struct StrUserData { + explicit StrUserData(std::string value) : value(value) {} + std::string value; +}; +TEST(ExecutionContextTest, EmplaceUserData) { ExecutionContext context; - TF_ASSERT_OK(context.Emplace("foo", &string_data, deleter)); + TF_ASSERT_OK(context.Emplace(42)); + TF_ASSERT_OK(context.Emplace("hello")); - TF_ASSERT_OK_AND_ASSIGN(auto opaque_data, context.Lookup("foo")); - ASSERT_NE(opaque_data, nullptr); + TF_ASSERT_OK_AND_ASSIGN(auto* i32_data, context.Lookup()); + TF_ASSERT_OK_AND_ASSIGN(auto* str_data, context.Lookup()); - StringUserData* user_data = static_cast(opaque_data->data()); - EXPECT_EQ(user_data, &string_data); + ASSERT_NE(i32_data, nullptr); + ASSERT_NE(str_data, nullptr); + ASSERT_EQ(i32_data->value, 42); + ASSERT_EQ(str_data->value, "hello"); } -TEST(ExecutionContextTest, UserData) { +TEST(ExecutionContextTest, InsertUserOwned) { + I32UserData user_data(42); + ExecutionContext context; - TF_ASSERT_OK(context.Emplace(42)); + TF_ASSERT_OK(context.Insert(&user_data)); - TF_ASSERT_OK_AND_ASSIGN(auto i32_data, context.Lookup()); - ASSERT_NE(i32_data, nullptr); - ASSERT_EQ(i32_data->value, 42); + TF_ASSERT_OK_AND_ASSIGN(auto* i32_data, context.Lookup()); + ASSERT_EQ(i32_data, &user_data); +} + +TEST(ExecutionContextTest, InsertUserOwnedWithTypeId) { + TF_ASSERT_OK_AND_ASSIGN( + ExecutionContext::TypeId type_id, + ExecutionContext::RegisterExternalTypeId("I32UserData")); + + I32UserData user_data(42); + + ExecutionContext context; + TF_ASSERT_OK(context.Insert(type_id, &user_data)); + + TF_ASSERT_OK_AND_ASSIGN(auto* i32_data, context.Lookup(type_id)); + ASSERT_EQ(i32_data, &user_data); } TEST(ExecutionContextTest, UserDataNotFound) { diff --git a/third_party/xla/xla/ffi/ffi.h b/third_party/xla/xla/ffi/ffi.h index 4620f8ef63ccc2..3100bc3a4fa3b7 100644 --- a/third_party/xla/xla/ffi/ffi.h +++ b/third_party/xla/xla/ffi/ffi.h @@ -331,7 +331,7 @@ struct UserData {}; template struct CtxDecoding> { - using Type = std::shared_ptr; + using Type = T*; static std::optional Decode(const XLA_FFI_Api* api, XLA_FFI_ExecutionContext* ctx, @@ -360,7 +360,7 @@ struct CtxDecoding> { template <> struct ResultEncoding { - static XLA_FFI_Error* Encode(const XLA_FFI_Api* api, Status status) { + static XLA_FFI_Error* Encode(const XLA_FFI_Api* api, absl::Status status) { return api->internal_api->XLA_FFI_INTERNAL_Error_Forward(&status); } }; diff --git a/third_party/xla/xla/ffi/ffi_api.cc b/third_party/xla/xla/ffi/ffi_api.cc index d9b3e3874c897d..02e6b69e4f9064 100644 --- a/third_party/xla/xla/ffi/ffi_api.cc +++ b/third_party/xla/xla/ffi/ffi_api.cc @@ -63,14 +63,15 @@ bool IsCommandBufferCompatible(XLA_FFI_Handler_Traits traits) { // WARNING: These functions defined in `call_frame.h` as we need to make them // available without having to depend on `ffi.h` header. -Status TakeStatus(XLA_FFI_Error* error) { +absl::Status TakeStatus(XLA_FFI_Error* error) { if (error == nullptr) return absl::OkStatus(); - Status status = std::move(error->status); + absl::Status status = std::move(error->status); delete error; return status; } -Status Call(Ffi& handler, CallFrame& call_frame, const CallOptions& options) { +absl::Status Call(Ffi& handler, CallFrame& call_frame, + const CallOptions& options) { XLA_FFI_ExecutionContext ctx = { options.run_options, options.called_computation, internal::ScopedExecutionContext::GetCallExecutionContext(options)}; @@ -78,8 +79,8 @@ Status Call(Ffi& handler, CallFrame& call_frame, const CallOptions& options) { return TakeStatus(handler.Call(&ffi_call_frame)); } -Status Call(XLA_FFI_Handler* handler, CallFrame& call_frame, - const CallOptions& options) { +absl::Status Call(XLA_FFI_Handler* handler, CallFrame& call_frame, + const CallOptions& options) { XLA_FFI_ExecutionContext ctx = { options.run_options, options.called_computation, internal::ScopedExecutionContext::GetCallExecutionContext(options)}; @@ -127,16 +128,17 @@ static HandlerRegistry& GetHandlerRegistry() { return *registry; } -static Status RegisterHandler(std::string_view name, std::string_view platform, - XLA_FFI_Handler* handler, - XLA_FFI_Handler_Traits traits) { +static absl::Status RegisterHandler(std::string_view name, + std::string_view platform, + XLA_FFI_Handler* handler, + XLA_FFI_Handler_Traits traits) { auto emplaced = GetHandlerRegistry().try_emplace( MakeHandlerKey(name, platform), HandlerRegistration{handler, traits}); if (!emplaced.second) return absl::InvalidArgumentError( absl::StrCat("Duplicate FFI handler registration for ", name, " on a platform ", platform)); - return OkStatus(); + return absl::OkStatus(); } absl::StatusOr FindHandler(std::string_view name, @@ -172,8 +174,8 @@ static std::string StructSizeErrorMsg(std::string_view struct_name, XLA_FFI_API_MAJOR, ".", XLA_FFI_API_MINOR, "."); } -static Status ActualStructSizeIsGreaterOrEqual(std::string_view struct_name, - size_t expected, size_t actual) { +static absl::Status ActualStructSizeIsGreaterOrEqual( + std::string_view struct_name, size_t expected, size_t actual) { if (actual < expected) { return absl::InvalidArgumentError( StructSizeErrorMsg(struct_name, expected, actual)); @@ -225,7 +227,7 @@ static absl::StatusCode ToStatusCode(XLA_FFI_Error_Code errc) { #define XLA_FFI_RETURN_IF_ERROR(expr) \ do { \ - Status _status = (expr); \ + absl::Status _status = (expr); \ if (!_status.ok()) { \ XLA_FFI_Error* _c_status = new XLA_FFI_Error{std::move(_status)}; \ return _c_status; \ @@ -241,7 +243,7 @@ static XLA_FFI_Error* XLA_FFI_Error_Create(XLA_FFI_Error_Create_Args* args) { } static void XLA_FFI_Error_GetMessage(XLA_FFI_Error_GetMessage_Args* args) { - Status struct_size_check = ActualStructSizeIsGreaterOrEqual( + absl::Status struct_size_check = ActualStructSizeIsGreaterOrEqual( "XLA_FFI_Error_GetMessage", XLA_FFI_Error_GetMessage_Args_STRUCT_SIZE, args->struct_size); if (!struct_size_check.ok()) { @@ -253,7 +255,7 @@ static void XLA_FFI_Error_GetMessage(XLA_FFI_Error_GetMessage_Args* args) { } static void XLA_FFI_Error_Destroy(XLA_FFI_Error_Destroy_Args* args) { - Status struct_size_check = ActualStructSizeIsGreaterOrEqual( + absl::Status struct_size_check = ActualStructSizeIsGreaterOrEqual( "XLA_FFI_Error_Destroy", XLA_FFI_Error_Destroy_Args_STRUCT_SIZE, args->struct_size); if (!struct_size_check.ok()) { @@ -289,21 +291,35 @@ static XLA_FFI_Error* XLA_FFI_Stream_Get(XLA_FFI_Stream_Get_Args* args) { return nullptr; } +static XLA_FFI_Error* XLA_FFI_TypeId_Register( + XLA_FFI_TypeId_Register_Args* args) { + XLA_FFI_RETURN_IF_ERROR(ActualStructSizeIsGreaterOrEqual( + "XLA_FFI_ExecutionContext_Get_Args", + XLA_FFI_ExecutionContext_Get_Args_STRUCT_SIZE, args->struct_size)); + + auto type_id = ExecutionContext::RegisterExternalTypeId( + std::string_view(args->name.ptr, args->name.len)); + if (!type_id.ok()) { + return new XLA_FFI_Error{std::move(type_id).status()}; + } + + args->type_id->type_id = type_id->value(); + return nullptr; +} + static XLA_FFI_Error* XLA_FFI_ExecutionContext_Get( XLA_FFI_ExecutionContext_Get_Args* args) { XLA_FFI_RETURN_IF_ERROR(ActualStructSizeIsGreaterOrEqual( "XLA_FFI_ExecutionContext_Get_Args", XLA_FFI_ExecutionContext_Get_Args_STRUCT_SIZE, args->struct_size)); - XLA_FFI_RETURN_IF_ERROR(ActualStructSizeIsGreaterOrEqual( - "XLA_FFI_ByteSpan", XLA_FFI_ByteSpan_STRUCT_SIZE, args->id.struct_size)); - auto opaque = args->ctx->execution_context->Lookup( - std::string_view(args->id.ptr, args->id.len)); - if (!opaque.ok()) { - return new XLA_FFI_Error{std::move(opaque).status()}; + auto user_data = args->ctx->execution_context->Lookup( + ExecutionContext::TypeId(args->type_id->type_id)); + if (!user_data.ok()) { + return new XLA_FFI_Error{std::move(user_data).status()}; } - args->data = (*opaque)->data(); + args->data = *user_data; return nullptr; } @@ -360,12 +376,13 @@ static XLA_FFI_Api api = { &internal_api, - XLA_FFI_Error_Create, // creates error - XLA_FFI_Error_GetMessage, // get error message - XLA_FFI_Error_Destroy, // frees error - XLA_FFI_Handler_Register, // registers handler - XLA_FFI_Stream_Get, // returns platform specific stream - XLA_FFI_ExecutionContext_Get, // returns execution context data + XLA_FFI_Error_Create, + XLA_FFI_Error_GetMessage, + XLA_FFI_Error_Destroy, + XLA_FFI_Handler_Register, + XLA_FFI_Stream_Get, + XLA_FFI_TypeId_Register, + XLA_FFI_ExecutionContext_Get, }; const XLA_FFI_Api* GetXlaFfiApi() { return &api; } diff --git a/third_party/xla/xla/ffi/ffi_api.h b/third_party/xla/xla/ffi/ffi_api.h index dc50916f8ab5b1..766fb83c83026c 100644 --- a/third_party/xla/xla/ffi/ffi_api.h +++ b/third_party/xla/xla/ffi/ffi_api.h @@ -50,13 +50,13 @@ struct CallOptions { // Takes ownership of the XLA FFI error and returns underlying status. Frees // `error` if it's not nullptr; returns OK status otherwise. -Status TakeStatus(XLA_FFI_Error* error); +absl::Status TakeStatus(XLA_FFI_Error* error); -Status Call(Ffi& handler, CallFrame& call_frame, - const CallOptions& options = {}); +absl::Status Call(Ffi& handler, CallFrame& call_frame, + const CallOptions& options = {}); -Status Call(XLA_FFI_Handler* handler, CallFrame& call_frame, - const CallOptions& options = {}); +absl::Status Call(XLA_FFI_Handler* handler, CallFrame& call_frame, + const CallOptions& options = {}); namespace internal { // This is an internal workaround to override FFI execution context for FFI diff --git a/third_party/xla/xla/ffi/ffi_test.cc b/third_party/xla/xla/ffi/ffi_test.cc index c95e7e3d28026b..f2c05eaf32313d 100644 --- a/third_party/xla/xla/ffi/ffi_test.cc +++ b/third_party/xla/xla/ffi/ffi_test.cc @@ -615,24 +615,24 @@ TEST(FfiTest, RunOptionsCtx) { TF_ASSERT_OK(status); } -struct MyData : public ExecutionContext::UserData { - explicit MyData(std::string str) : str(std::move(str)) {} +struct StrUserData { + explicit StrUserData(std::string str) : str(std::move(str)) {} std::string str; }; TEST(FfiTest, UserData) { ExecutionContext execution_context; - TF_ASSERT_OK(execution_context.Emplace("foo")); + TF_ASSERT_OK(execution_context.Emplace("foo")); CallFrameBuilder builder; auto call_frame = builder.Build(); - auto fn = [&](std::shared_ptr data) { + auto fn = [&](StrUserData* data) { EXPECT_EQ(data->str, "foo"); return absl::OkStatus(); }; - auto handler = Ffi::Bind().Ctx>().To(fn); + auto handler = Ffi::Bind().Ctx>().To(fn); ServiceExecutableRunOptions opts; opts.mutable_run_options()->set_ffi_execution_context(&execution_context); diff --git a/third_party/xla/xla/frontend_attributes.cc b/third_party/xla/xla/frontend_attributes.cc index 8831040f89c15c..53ee0a4d1f1643 100644 --- a/third_party/xla/xla/frontend_attributes.cc +++ b/third_party/xla/xla/frontend_attributes.cc @@ -14,6 +14,9 @@ limitations under the License. ==============================================================================*/ #include "xla/frontend_attributes.h" +#include "xla/hlo/ir/hlo_instruction.h" +#include "xla/xla_data.pb.h" + namespace xla { void SetDisjointReadWriteRegionsAttr(HloInstruction* instruction) { diff --git a/third_party/xla/xla/hlo/evaluator/hlo_evaluator.cc b/third_party/xla/xla/hlo/evaluator/hlo_evaluator.cc index 4ec41dc7d81b4a..9df1e652d24dd5 100644 --- a/third_party/xla/xla/hlo/evaluator/hlo_evaluator.cc +++ b/third_party/xla/xla/hlo/evaluator/hlo_evaluator.cc @@ -159,9 +159,10 @@ std::optional GetInstructionStaticValueAsBool( template struct PopulateParallelImpl { using NativeT = NativeTypeOf; - static Status Run(Literal& literal, - absl::FunctionRef, int)> - literal_generator) { + static absl::Status Run( + Literal& literal, + absl::FunctionRef, int)> + literal_generator) { return literal.PopulateParallel( [&literal_generator](absl::Span output_index, int thread_id) { @@ -174,7 +175,7 @@ struct PopulateParallelImpl { template struct PopulateImpl { using NativeT = NativeTypeOf; - static Status Run( + static absl::Status Run( Literal& literal, absl::FunctionRef)> literal_generator) { return literal.Populate( @@ -192,10 +193,10 @@ struct PopulateImpl { // to small templated helpers just for the parts that require manipulating the // native types to avoid templating the whole implementations. template