diff --git a/c-deps/libroach/protos/roachpb/data.pb.cc b/c-deps/libroach/protos/roachpb/data.pb.cc index 5b5bafbed337..617b818ca6fd 100644 --- a/c-deps/libroach/protos/roachpb/data.pb.cc +++ b/c-deps/libroach/protos/roachpb/data.pb.cc @@ -86,6 +86,11 @@ class AbortSpanEntryDefaultTypeInternal { ::google::protobuf::internal::ExplicitlyConstructed _instance; } _AbortSpanEntry_default_instance_; +class TxnCoordMetaDefaultTypeInternal { +public: + ::google::protobuf::internal::ExplicitlyConstructed + _instance; +} _TxnCoordMeta_default_instance_; namespace protobuf_roachpb_2fdata_2eproto { @@ -114,6 +119,7 @@ PROTOBUF_CONSTEXPR_VAR ::google::protobuf::internal::ParseTable const { NULL, NULL, 0, -1, -1, -1, -1, NULL, false }, { NULL, NULL, 0, -1, -1, -1, -1, NULL, false }, { NULL, NULL, 0, -1, -1, -1, -1, NULL, false }, + { NULL, NULL, 0, -1, -1, -1, -1, NULL, false }, }; void TableStruct::InitDefaultsImpl() { @@ -151,7 +157,9 @@ void TableStruct::InitDefaultsImpl() { ::google::protobuf::internal::OnShutdownDestroyMessage( &_Lease_default_instance_);_AbortSpanEntry_default_instance_._instance.DefaultConstruct(); ::google::protobuf::internal::OnShutdownDestroyMessage( - &_AbortSpanEntry_default_instance_);_Value_default_instance_._instance.get_mutable()->timestamp_ = const_cast< ::cockroach::util::hlc::Timestamp*>( + &_AbortSpanEntry_default_instance_);_TxnCoordMeta_default_instance_._instance.DefaultConstruct(); + ::google::protobuf::internal::OnShutdownDestroyMessage( + &_TxnCoordMeta_default_instance_);_Value_default_instance_._instance.get_mutable()->timestamp_ = const_cast< ::cockroach::util::hlc::Timestamp*>( ::cockroach::util::hlc::Timestamp::internal_default_instance()); _KeyValue_default_instance_._instance.get_mutable()->value_ = const_cast< ::cockroach::roachpb::Value*>( ::cockroach::roachpb::Value::internal_default_instance()); @@ -203,6 +211,8 @@ void TableStruct::InitDefaultsImpl() { ::cockroach::util::hlc::Timestamp::internal_default_instance()); _AbortSpanEntry_default_instance_._instance.get_mutable()->timestamp_ = const_cast< ::cockroach::util::hlc::Timestamp*>( ::cockroach::util::hlc::Timestamp::internal_default_instance()); + _TxnCoordMeta_default_instance_._instance.get_mutable()->txn_ = const_cast< ::cockroach::roachpb::Transaction*>( + ::cockroach::roachpb::Transaction::internal_default_instance()); } void InitDefaults() { @@ -6039,6 +6049,357 @@ void AbortSpanEntry::set_priority(::google::protobuf::int32 value) { #endif // PROTOBUF_INLINE_NOT_IN_HEADERS +// =================================================================== + +#if !defined(_MSC_VER) || _MSC_VER >= 1900 +const int TxnCoordMeta::kTxnFieldNumber; +const int TxnCoordMeta::kIntentsFieldNumber; +const int TxnCoordMeta::kCommandCountFieldNumber; +#endif // !defined(_MSC_VER) || _MSC_VER >= 1900 + +TxnCoordMeta::TxnCoordMeta() + : ::google::protobuf::MessageLite(), _internal_metadata_(NULL) { + if (GOOGLE_PREDICT_TRUE(this != internal_default_instance())) { + protobuf_roachpb_2fdata_2eproto::InitDefaults(); + } + SharedCtor(); + // @@protoc_insertion_point(constructor:cockroach.roachpb.TxnCoordMeta) +} +TxnCoordMeta::TxnCoordMeta(const TxnCoordMeta& from) + : ::google::protobuf::MessageLite(), + _internal_metadata_(NULL), + intents_(from.intents_), + _cached_size_(0) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + if (from.has_txn()) { + txn_ = new ::cockroach::roachpb::Transaction(*from.txn_); + } else { + txn_ = NULL; + } + command_count_ = from.command_count_; + // @@protoc_insertion_point(copy_constructor:cockroach.roachpb.TxnCoordMeta) +} + +void TxnCoordMeta::SharedCtor() { + ::memset(&txn_, 0, static_cast( + reinterpret_cast(&command_count_) - + reinterpret_cast(&txn_)) + sizeof(command_count_)); + _cached_size_ = 0; +} + +TxnCoordMeta::~TxnCoordMeta() { + // @@protoc_insertion_point(destructor:cockroach.roachpb.TxnCoordMeta) + SharedDtor(); +} + +void TxnCoordMeta::SharedDtor() { + if (this != internal_default_instance()) delete txn_; +} + +void TxnCoordMeta::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const TxnCoordMeta& TxnCoordMeta::default_instance() { + protobuf_roachpb_2fdata_2eproto::InitDefaults(); + return *internal_default_instance(); +} + +TxnCoordMeta* TxnCoordMeta::New(::google::protobuf::Arena* arena) const { + TxnCoordMeta* n = new TxnCoordMeta; + if (arena != NULL) { + arena->Own(n); + } + return n; +} + +void TxnCoordMeta::Clear() { +// @@protoc_insertion_point(message_clear_start:cockroach.roachpb.TxnCoordMeta) + ::google::protobuf::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + intents_.Clear(); + if (GetArenaNoVirtual() == NULL && txn_ != NULL) { + delete txn_; + } + txn_ = NULL; + command_count_ = 0; + _internal_metadata_.Clear(); +} + +bool TxnCoordMeta::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + ::google::protobuf::io::LazyStringOutputStream unknown_fields_string( + ::google::protobuf::NewPermanentCallback(&_internal_metadata_, + &::google::protobuf::internal::InternalMetadataWithArenaLite:: + mutable_unknown_fields)); + ::google::protobuf::io::CodedOutputStream unknown_fields_stream( + &unknown_fields_string, false); + // @@protoc_insertion_point(parse_start:cockroach.roachpb.TxnCoordMeta) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + case 1: { + if (static_cast< ::google::protobuf::uint8>(tag) == + static_cast< ::google::protobuf::uint8>(10u /* 10 & 0xFF */)) { + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, mutable_txn())); + } else { + goto handle_unusual; + } + break; + } + + case 2: { + if (static_cast< ::google::protobuf::uint8>(tag) == + static_cast< ::google::protobuf::uint8>(18u /* 18 & 0xFF */)) { + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, add_intents())); + } else { + goto handle_unusual; + } + break; + } + + // int32 command_count = 3; + case 3: { + if (static_cast< ::google::protobuf::uint8>(tag) == + static_cast< ::google::protobuf::uint8>(24u /* 24 & 0xFF */)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>( + input, &command_count_))); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::google::protobuf::internal::WireFormatLite::SkipField( + input, tag, &unknown_fields_stream)); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:cockroach.roachpb.TxnCoordMeta) + return true; +failure: + // @@protoc_insertion_point(parse_failure:cockroach.roachpb.TxnCoordMeta) + return false; +#undef DO_ +} + +void TxnCoordMeta::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:cockroach.roachpb.TxnCoordMeta) + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (this->has_txn()) { + ::google::protobuf::internal::WireFormatLite::WriteMessage( + 1, *this->txn_, output); + } + + for (unsigned int i = 0, + n = static_cast(this->intents_size()); i < n; i++) { + ::google::protobuf::internal::WireFormatLite::WriteMessage( + 2, this->intents(static_cast(i)), output); + } + + // int32 command_count = 3; + if (this->command_count() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteInt32(3, this->command_count(), output); + } + + output->WriteRaw((::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance()).data(), + static_cast((::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance()).size())); + // @@protoc_insertion_point(serialize_end:cockroach.roachpb.TxnCoordMeta) +} + +size_t TxnCoordMeta::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:cockroach.roachpb.TxnCoordMeta) + size_t total_size = 0; + + total_size += (::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance()).size(); + + { + unsigned int count = static_cast(this->intents_size()); + total_size += 1UL * count; + for (unsigned int i = 0; i < count; i++) { + total_size += + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + this->intents(static_cast(i))); + } + } + + if (this->has_txn()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + *this->txn_); + } + + // int32 command_count = 3; + if (this->command_count() != 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::Int32Size( + this->command_count()); + } + + int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = cached_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void TxnCoordMeta::CheckTypeAndMergeFrom( + const ::google::protobuf::MessageLite& from) { + MergeFrom(*::google::protobuf::down_cast(&from)); +} + +void TxnCoordMeta::MergeFrom(const TxnCoordMeta& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:cockroach.roachpb.TxnCoordMeta) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::google::protobuf::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + intents_.MergeFrom(from.intents_); + if (from.has_txn()) { + mutable_txn()->::cockroach::roachpb::Transaction::MergeFrom(from.txn()); + } + if (from.command_count() != 0) { + set_command_count(from.command_count()); + } +} + +void TxnCoordMeta::CopyFrom(const TxnCoordMeta& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:cockroach.roachpb.TxnCoordMeta) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool TxnCoordMeta::IsInitialized() const { + return true; +} + +void TxnCoordMeta::Swap(TxnCoordMeta* other) { + if (other == this) return; + InternalSwap(other); +} +void TxnCoordMeta::InternalSwap(TxnCoordMeta* other) { + using std::swap; + intents_.InternalSwap(&other->intents_); + swap(txn_, other->txn_); + swap(command_count_, other->command_count_); + _internal_metadata_.Swap(&other->_internal_metadata_); + swap(_cached_size_, other->_cached_size_); +} + +::std::string TxnCoordMeta::GetTypeName() const { + return "cockroach.roachpb.TxnCoordMeta"; +} + +#if PROTOBUF_INLINE_NOT_IN_HEADERS +// TxnCoordMeta + +bool TxnCoordMeta::has_txn() const { + return this != internal_default_instance() && txn_ != NULL; +} +void TxnCoordMeta::clear_txn() { + if (GetArenaNoVirtual() == NULL && txn_ != NULL) delete txn_; + txn_ = NULL; +} +const ::cockroach::roachpb::Transaction& TxnCoordMeta::txn() const { + const ::cockroach::roachpb::Transaction* p = txn_; + // @@protoc_insertion_point(field_get:cockroach.roachpb.TxnCoordMeta.txn) + return p != NULL ? *p : *reinterpret_cast( + &::cockroach::roachpb::_Transaction_default_instance_); +} +::cockroach::roachpb::Transaction* TxnCoordMeta::mutable_txn() { + + if (txn_ == NULL) { + txn_ = new ::cockroach::roachpb::Transaction; + } + // @@protoc_insertion_point(field_mutable:cockroach.roachpb.TxnCoordMeta.txn) + return txn_; +} +::cockroach::roachpb::Transaction* TxnCoordMeta::release_txn() { + // @@protoc_insertion_point(field_release:cockroach.roachpb.TxnCoordMeta.txn) + + ::cockroach::roachpb::Transaction* temp = txn_; + txn_ = NULL; + return temp; +} +void TxnCoordMeta::set_allocated_txn(::cockroach::roachpb::Transaction* txn) { + delete txn_; + txn_ = txn; + if (txn) { + + } else { + + } + // @@protoc_insertion_point(field_set_allocated:cockroach.roachpb.TxnCoordMeta.txn) +} + +int TxnCoordMeta::intents_size() const { + return intents_.size(); +} +void TxnCoordMeta::clear_intents() { + intents_.Clear(); +} +const ::cockroach::roachpb::Span& TxnCoordMeta::intents(int index) const { + // @@protoc_insertion_point(field_get:cockroach.roachpb.TxnCoordMeta.intents) + return intents_.Get(index); +} +::cockroach::roachpb::Span* TxnCoordMeta::mutable_intents(int index) { + // @@protoc_insertion_point(field_mutable:cockroach.roachpb.TxnCoordMeta.intents) + return intents_.Mutable(index); +} +::cockroach::roachpb::Span* TxnCoordMeta::add_intents() { + // @@protoc_insertion_point(field_add:cockroach.roachpb.TxnCoordMeta.intents) + return intents_.Add(); +} +::google::protobuf::RepeatedPtrField< ::cockroach::roachpb::Span >* +TxnCoordMeta::mutable_intents() { + // @@protoc_insertion_point(field_mutable_list:cockroach.roachpb.TxnCoordMeta.intents) + return &intents_; +} +const ::google::protobuf::RepeatedPtrField< ::cockroach::roachpb::Span >& +TxnCoordMeta::intents() const { + // @@protoc_insertion_point(field_list:cockroach.roachpb.TxnCoordMeta.intents) + return intents_; +} + +// int32 command_count = 3; +void TxnCoordMeta::clear_command_count() { + command_count_ = 0; +} +::google::protobuf::int32 TxnCoordMeta::command_count() const { + // @@protoc_insertion_point(field_get:cockroach.roachpb.TxnCoordMeta.command_count) + return command_count_; +} +void TxnCoordMeta::set_command_count(::google::protobuf::int32 value) { + + command_count_ = value; + // @@protoc_insertion_point(field_set:cockroach.roachpb.TxnCoordMeta.command_count) +} + +#endif // PROTOBUF_INLINE_NOT_IN_HEADERS + // @@protoc_insertion_point(namespace_scope) } // namespace roachpb diff --git a/c-deps/libroach/protos/roachpb/data.pb.h b/c-deps/libroach/protos/roachpb/data.pb.h index 054cc86d7f9c..ae8c3c7b3830 100644 --- a/c-deps/libroach/protos/roachpb/data.pb.h +++ b/c-deps/libroach/protos/roachpb/data.pb.h @@ -74,6 +74,9 @@ extern StoreIdentDefaultTypeInternal _StoreIdent_default_instance_; class Transaction; class TransactionDefaultTypeInternal; extern TransactionDefaultTypeInternal _Transaction_default_instance_; +class TxnCoordMeta; +class TxnCoordMetaDefaultTypeInternal; +extern TxnCoordMetaDefaultTypeInternal _TxnCoordMeta_default_instance_; class Value; class ValueDefaultTypeInternal; extern ValueDefaultTypeInternal _Value_default_instance_; @@ -1842,6 +1845,122 @@ class AbortSpanEntry : public ::google::protobuf::MessageLite /* @@protoc_insert mutable int _cached_size_; friend struct protobuf_roachpb_2fdata_2eproto::TableStruct; }; +// ------------------------------------------------------------------- + +class TxnCoordMeta : public ::google::protobuf::MessageLite /* @@protoc_insertion_point(class_definition:cockroach.roachpb.TxnCoordMeta) */ { + public: + TxnCoordMeta(); + virtual ~TxnCoordMeta(); + + TxnCoordMeta(const TxnCoordMeta& from); + + inline TxnCoordMeta& operator=(const TxnCoordMeta& from) { + CopyFrom(from); + return *this; + } + #if LANG_CXX11 + TxnCoordMeta(TxnCoordMeta&& from) noexcept + : TxnCoordMeta() { + *this = ::std::move(from); + } + + inline TxnCoordMeta& operator=(TxnCoordMeta&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + #endif + static const TxnCoordMeta& default_instance(); + + static inline const TxnCoordMeta* internal_default_instance() { + return reinterpret_cast( + &_TxnCoordMeta_default_instance_); + } + static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = + 14; + + void Swap(TxnCoordMeta* other); + friend void swap(TxnCoordMeta& a, TxnCoordMeta& b) { + a.Swap(&b); + } + + // implements Message ---------------------------------------------- + + inline TxnCoordMeta* New() const PROTOBUF_FINAL { return New(NULL); } + + TxnCoordMeta* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL; + void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from) + PROTOBUF_FINAL; + void CopyFrom(const TxnCoordMeta& from); + void MergeFrom(const TxnCoordMeta& from); + void Clear() PROTOBUF_FINAL; + bool IsInitialized() const PROTOBUF_FINAL; + + size_t ByteSizeLong() const PROTOBUF_FINAL; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL; + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL; + void DiscardUnknownFields(); + int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + void InternalSwap(TxnCoordMeta* other); + private: + inline ::google::protobuf::Arena* GetArenaNoVirtual() const { + return NULL; + } + inline void* MaybeArenaPtr() const { + return NULL; + } + public: + + ::std::string GetTypeName() const PROTOBUF_FINAL; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + int intents_size() const; + void clear_intents(); + static const int kIntentsFieldNumber = 2; + const ::cockroach::roachpb::Span& intents(int index) const; + ::cockroach::roachpb::Span* mutable_intents(int index); + ::cockroach::roachpb::Span* add_intents(); + ::google::protobuf::RepeatedPtrField< ::cockroach::roachpb::Span >* + mutable_intents(); + const ::google::protobuf::RepeatedPtrField< ::cockroach::roachpb::Span >& + intents() const; + + bool has_txn() const; + void clear_txn(); + static const int kTxnFieldNumber = 1; + const ::cockroach::roachpb::Transaction& txn() const; + ::cockroach::roachpb::Transaction* mutable_txn(); + ::cockroach::roachpb::Transaction* release_txn(); + void set_allocated_txn(::cockroach::roachpb::Transaction* txn); + + // int32 command_count = 3; + void clear_command_count(); + static const int kCommandCountFieldNumber = 3; + ::google::protobuf::int32 command_count() const; + void set_command_count(::google::protobuf::int32 value); + + // @@protoc_insertion_point(class_scope:cockroach.roachpb.TxnCoordMeta) + private: + + ::google::protobuf::internal::InternalMetadataWithArenaLite _internal_metadata_; + ::google::protobuf::RepeatedPtrField< ::cockroach::roachpb::Span > intents_; + ::cockroach::roachpb::Transaction* txn_; + ::google::protobuf::int32 command_count_; + mutable int _cached_size_; + friend struct protobuf_roachpb_2fdata_2eproto::TableStruct; +}; // =================================================================== @@ -3569,6 +3688,92 @@ inline void AbortSpanEntry::set_priority(::google::protobuf::int32 value) { // @@protoc_insertion_point(field_set:cockroach.roachpb.AbortSpanEntry.priority) } +// ------------------------------------------------------------------- + +// TxnCoordMeta + +inline bool TxnCoordMeta::has_txn() const { + return this != internal_default_instance() && txn_ != NULL; +} +inline void TxnCoordMeta::clear_txn() { + if (GetArenaNoVirtual() == NULL && txn_ != NULL) delete txn_; + txn_ = NULL; +} +inline const ::cockroach::roachpb::Transaction& TxnCoordMeta::txn() const { + const ::cockroach::roachpb::Transaction* p = txn_; + // @@protoc_insertion_point(field_get:cockroach.roachpb.TxnCoordMeta.txn) + return p != NULL ? *p : *reinterpret_cast( + &::cockroach::roachpb::_Transaction_default_instance_); +} +inline ::cockroach::roachpb::Transaction* TxnCoordMeta::mutable_txn() { + + if (txn_ == NULL) { + txn_ = new ::cockroach::roachpb::Transaction; + } + // @@protoc_insertion_point(field_mutable:cockroach.roachpb.TxnCoordMeta.txn) + return txn_; +} +inline ::cockroach::roachpb::Transaction* TxnCoordMeta::release_txn() { + // @@protoc_insertion_point(field_release:cockroach.roachpb.TxnCoordMeta.txn) + + ::cockroach::roachpb::Transaction* temp = txn_; + txn_ = NULL; + return temp; +} +inline void TxnCoordMeta::set_allocated_txn(::cockroach::roachpb::Transaction* txn) { + delete txn_; + txn_ = txn; + if (txn) { + + } else { + + } + // @@protoc_insertion_point(field_set_allocated:cockroach.roachpb.TxnCoordMeta.txn) +} + +inline int TxnCoordMeta::intents_size() const { + return intents_.size(); +} +inline void TxnCoordMeta::clear_intents() { + intents_.Clear(); +} +inline const ::cockroach::roachpb::Span& TxnCoordMeta::intents(int index) const { + // @@protoc_insertion_point(field_get:cockroach.roachpb.TxnCoordMeta.intents) + return intents_.Get(index); +} +inline ::cockroach::roachpb::Span* TxnCoordMeta::mutable_intents(int index) { + // @@protoc_insertion_point(field_mutable:cockroach.roachpb.TxnCoordMeta.intents) + return intents_.Mutable(index); +} +inline ::cockroach::roachpb::Span* TxnCoordMeta::add_intents() { + // @@protoc_insertion_point(field_add:cockroach.roachpb.TxnCoordMeta.intents) + return intents_.Add(); +} +inline ::google::protobuf::RepeatedPtrField< ::cockroach::roachpb::Span >* +TxnCoordMeta::mutable_intents() { + // @@protoc_insertion_point(field_mutable_list:cockroach.roachpb.TxnCoordMeta.intents) + return &intents_; +} +inline const ::google::protobuf::RepeatedPtrField< ::cockroach::roachpb::Span >& +TxnCoordMeta::intents() const { + // @@protoc_insertion_point(field_list:cockroach.roachpb.TxnCoordMeta.intents) + return intents_; +} + +// int32 command_count = 3; +inline void TxnCoordMeta::clear_command_count() { + command_count_ = 0; +} +inline ::google::protobuf::int32 TxnCoordMeta::command_count() const { + // @@protoc_insertion_point(field_get:cockroach.roachpb.TxnCoordMeta.command_count) + return command_count_; +} +inline void TxnCoordMeta::set_command_count(::google::protobuf::int32 value) { + + command_count_ = value; + // @@protoc_insertion_point(field_set:cockroach.roachpb.TxnCoordMeta.command_count) +} + #ifdef __GNUC__ #pragma GCC diagnostic pop #endif // __GNUC__ @@ -3599,6 +3804,8 @@ inline void AbortSpanEntry::set_priority(::google::protobuf::int32 value) { // ------------------------------------------------------------------- +// ------------------------------------------------------------------- + // @@protoc_insertion_point(namespace_scope) diff --git a/pkg/ccl/sqlccl/backup.go b/pkg/ccl/sqlccl/backup.go index 6d9abaaaf1c0..d442da2102cf 100644 --- a/pkg/ccl/sqlccl/backup.go +++ b/pkg/ccl/sqlccl/backup.go @@ -349,7 +349,7 @@ func resolveTargetsToDescriptors( // TODO(andrei): Plumb a gatewayNodeID in here and also find a way to // express that whatever this txn does should not count towards lease // placement stats. - txn := client.NewTxn(db, 0 /* gatewayNodeID */) + txn := client.NewTxn(db, 0 /* gatewayNodeID */, client.RootTxn) opt := client.TxnExecOptions{AutoRetry: true, AutoCommit: true} err := txn.Exec(ctx, opt, func(ctx context.Context, txn *client.Txn, opt *client.TxnExecOptions) error { var err error diff --git a/pkg/ccl/storageccl/bench_test.go b/pkg/ccl/storageccl/bench_test.go index d61b26a3d4c8..76b42380e739 100644 --- a/pkg/ccl/storageccl/bench_test.go +++ b/pkg/ccl/storageccl/bench_test.go @@ -45,7 +45,7 @@ func BenchmarkAddSSTable(b *testing.B) { ctx := context.Background() tc := testcluster.StartTestCluster(b, 3, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() id := sqlbase.ID(keys.MaxReservedDescID + 1) @@ -103,7 +103,7 @@ func BenchmarkWriteBatch(b *testing.B) { ctx := context.Background() tc := testcluster.StartTestCluster(b, 3, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() id := sqlbase.ID(keys.MaxReservedDescID + 1) var batch engine.RocksDBBatchBuilder @@ -159,7 +159,7 @@ func BenchmarkImport(b *testing.B) { ctx := context.Background() tc := testcluster.StartTestCluster(b, 3, args) defer tc.Stopper().Stop(ctx) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() id := sqlbase.ID(keys.MaxReservedDescID + 1) diff --git a/pkg/ccl/storageccl/export_test.go b/pkg/ccl/storageccl/export_test.go index 00cd8daf31c4..97f070efb80c 100644 --- a/pkg/ccl/storageccl/export_test.go +++ b/pkg/ccl/storageccl/export_test.go @@ -36,7 +36,7 @@ func TestExportCmd(t *testing.T) { defer dirCleanupFn() tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: dir}}) defer tc.Stopper().Stop(ctx) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() exportAndSlurpOne := func( t *testing.T, start hlc.Timestamp, mvccFilter roachpb.MVCCFilter, @@ -179,7 +179,7 @@ func TestExportGCThreshold(t *testing.T) { ctx := context.Background() tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() req := &roachpb.ExportRequest{ Span: roachpb.Span{Key: keys.UserTableDataMin, EndKey: keys.MaxKey}, diff --git a/pkg/internal/client/client_test.go b/pkg/internal/client/client_test.go index d57a3872e23d..30c4d64946a1 100644 --- a/pkg/internal/client/client_test.go +++ b/pkg/internal/client/client_test.go @@ -42,6 +42,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/syncutil" ) @@ -84,48 +85,8 @@ func checkKVs(t *testing.T, kvs []client.KeyValue, expected ...interface{}) { } } -// notifyingSender is a sender which can set up a notification channel -// (on call to reset()) for clients which need to wait on a command -// being sent. -type notifyingSender struct { - notify chan struct{} - wrapped client.Sender -} - -func (ss *notifyingSender) reset(notify chan struct{}) { - ss.notify = notify -} - -func (ss *notifyingSender) Send( - ctx context.Context, ba roachpb.BatchRequest, -) (*roachpb.BatchResponse, *roachpb.Error) { - br, pErr := ss.wrapped.Send(ctx, ba) - if br != nil && br.Error != nil { - panic(roachpb.ErrorUnexpectedlySet(ss.wrapped, br)) - } - - select { - case ss.notify <- struct{}{}: - default: - } - - return br, pErr -} - func createTestClient(t *testing.T, s serverutils.TestServerInterface) *client.DB { - return s.KVClient().(*client.DB) -} - -// createTestNotifyClient creates a new client which connects using an HTTP -// sender to the server at addr. It contains a waitgroup to allow waiting. -func createTestNotifyClient( - t *testing.T, s serverutils.TestServerInterface, priority roachpb.UserPriority, -) (*client.DB, *notifyingSender) { - db := createTestClient(t, s) - sender := ¬ifyingSender{wrapped: db.GetSender()} - dbCtx := client.DefaultDBContext() - dbCtx.UserPriority = priority - return client.NewDBWithContext(sender, s.Clock(), dbCtx), sender + return s.DB() } // TestClientRetryNonTxn verifies that non-transactional client will @@ -187,7 +148,7 @@ func TestClientRetryNonTxn(t *testing.T) { // succeeds iff the test dictates that it does. for i, test := range testCases { key := roachpb.Key(fmt.Sprintf("key-%d", i)) - db, sender := createTestNotifyClient(t, s, 1) + db := createTestClient(t, s) // doneCall signals when the non-txn read or write has completed. doneCall := make(chan error) @@ -213,13 +174,10 @@ func TestClientRetryNonTxn(t *testing.T) { if count == 1 { nonTxnCtx := context.TODO() - // We use a "notifying" sender here, which allows us to know exactly when the - // call has been processed; otherwise, we'd be dependent on timing. - // The channel lets us pause txn until after the non-txn method has run once. - // Use a channel length of size 1 to guarantee a notification through a - // non-blocking send. + // The channel lets us pause txn until after the non-txn + // method has run once. Use a channel length of size 1 to + // guarantee a notification through a non-blocking send. notify := make(chan struct{}, 1) - sender.reset(notify) // We must try the non-txn put or get in a goroutine because // it might have to retry and will only succeed immediately in // the event we can push. @@ -230,6 +188,10 @@ func TestClientRetryNonTxn(t *testing.T) { } else { err = db.Put(nonTxnCtx, key, "value") } + notify <- struct{}{} + if err != nil { + log.Errorf(context.TODO(), "error on non-txn request: %s", err) + } doneCall <- errors.Wrapf( err, "%d: expected success on non-txn call to %s", i, test.args.Method()) @@ -828,14 +790,16 @@ func TestInconsistentReads(t *testing.T) { // Mock out DistSender's sender function to check the read consistency for // outgoing BatchRequests and return an empty reply. - var senderFn client.SenderFunc = func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { - if ba.ReadConsistency != roachpb.INCONSISTENT { - return nil, roachpb.NewErrorf("BatchRequest has unexpected ReadConsistency %s", ba.ReadConsistency) - } - return ba.CreateReply(), nil - } + factory := client.TxnSenderFactoryFunc(func(_ client.TxnType) client.TxnSender { + return client.TxnSenderFunc(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + if ba.ReadConsistency != roachpb.INCONSISTENT { + return nil, roachpb.NewErrorf("BatchRequest has unexpected ReadConsistency %s", ba.ReadConsistency) + } + return ba.CreateReply(), nil + }) + }) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - db := client.NewDB(senderFn, clock) + db := client.NewDB(factory, clock) ctx := context.TODO() prepInconsistent := func() *client.Batch { @@ -888,7 +852,7 @@ func TestReadOnlyTxnObeysDeadline(t *testing.T) { t.Fatal(err) } - txn := client.NewTxn(db, 0 /* gatewayNodeID */) + txn := client.NewTxn(db, 0 /* gatewayNodeID */, client.RootTxn) // Only snapshot transactions can observe deadline errors; serializable ones // get a restart error before the deadline check. if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil { diff --git a/pkg/internal/client/db.go b/pkg/internal/client/db.go index 9cb2560f9101..0bfa8eeeb477 100644 --- a/pkg/internal/client/db.go +++ b/pkg/internal/client/db.go @@ -182,27 +182,29 @@ func DefaultDBContext() DBContext { // DB is a database handle to a single cockroach cluster. A DB is safe for // concurrent use by multiple goroutines. type DB struct { - sender Sender - clock *hlc.Clock - ctx DBContext + factory TxnSenderFactory + clock *hlc.Clock + ctx DBContext } -// GetSender returns the underlying Sender. Only exported for tests. +// GetSender returns a transaction-capable Sender instance. The same +// sender must be used for the entirety of a transaction. Get a new +// instance to start a new transaction. func (db *DB) GetSender() Sender { - return db.sender + return db.factory.New(RootTxn) } // NewDB returns a new DB. -func NewDB(sender Sender, clock *hlc.Clock) *DB { - return NewDBWithContext(sender, clock, DefaultDBContext()) +func NewDB(factory TxnSenderFactory, clock *hlc.Clock) *DB { + return NewDBWithContext(factory, clock, DefaultDBContext()) } // NewDBWithContext returns a new DB with the given parameters. -func NewDBWithContext(sender Sender, clock *hlc.Clock, ctx DBContext) *DB { +func NewDBWithContext(factory TxnSenderFactory, clock *hlc.Clock, ctx DBContext) *DB { return &DB{ - sender: sender, - clock: clock, - ctx: ctx, + factory: factory, + clock: clock, + ctx: ctx, } } @@ -491,14 +493,14 @@ func (db *DB) Txn(ctx context.Context, retryable func(context.Context, *Txn) err // TODO(radu): we should open a tracing Span here (we need to figure out how // to use the correct tracer). - // TODO(andrei): revisit this when TxnCoordSender is moved to the client + // TODO(andrei): revisit this when TxnSender is moved to the client // (https://github.com/cockroachdb/cockroach/issues/10511). ctx, cancel := context.WithCancel(ctx) defer cancel() // TODO(andrei): Plumb a gatewayNodeID here. If we pass 0, then the gateway // field will be filled in by the DistSender which will assume that the // current node is the gateway. - txn := NewTxn(db, 0 /* gatewayNodeID */) + txn := NewTxn(db, 0 /* gatewayNodeID */, RootTxn) txn.SetDebugName("unnamed") opts := TxnExecOptions{ AutoCommit: true, @@ -523,6 +525,13 @@ func (db *DB) Txn(ctx context.Context, retryable func(context.Context, *Txn) err // any errors. Returns (nil, nil) for an empty batch. func (db *DB) send( ctx context.Context, ba roachpb.BatchRequest, +) (*roachpb.BatchResponse, *roachpb.Error) { + return db.sendUsingSender(ctx, ba, db.GetSender()) +} + +// sendUsingSender uses the specified sender to send the batch request. +func (db *DB) sendUsingSender( + ctx context.Context, ba roachpb.BatchRequest, sender Sender, ) (*roachpb.BatchResponse, *roachpb.Error) { if len(ba.Requests) == 0 { return nil, nil @@ -543,7 +552,7 @@ func (db *DB) send( } tracing.AnnotateTrace() - br, pErr := db.sender.Send(ctx, ba) + br, pErr := sender.Send(ctx, ba) if pErr != nil { if log.V(1) { log.Infof(ctx, "failed batch: %s", pErr) diff --git a/pkg/internal/client/db_test.go b/pkg/internal/client/db_test.go index 1bbdc6330cfb..5934d0d600d0 100644 --- a/pkg/internal/client/db_test.go +++ b/pkg/internal/client/db_test.go @@ -393,7 +393,6 @@ func TestCommonMethods(t *testing.T) { {dbType, "GetSender"}: {}, {dbType, "PutInline"}: {}, {dbType, "WriteBatch"}: {}, - {txnType, "AcceptUnhandledRetryableErrors"}: {}, {txnType, "Commit"}: {}, {txnType, "CommitInBatch"}: {}, {txnType, "CommitOrCleanup"}: {}, @@ -403,6 +402,10 @@ func TestCommonMethods(t *testing.T) { {txnType, "GenerateForcedRetryableError"}: {}, {txnType, "InternalSetPriority"}: {}, {txnType, "IsFinalized"}: {}, + {txnType, "GetTxnCoordMeta"}: {}, + {txnType, "AugmentTxnCoordMeta"}: {}, + {txnType, "OnFinish"}: {}, + {txnType, "Sender"}: {}, {txnType, "IsSerializableRestart"}: {}, {txnType, "NewBatch"}: {}, {txnType, "Exec"}: {}, @@ -419,13 +422,11 @@ func TestCommonMethods(t *testing.T) { {txnType, "UpdateDeadlineMaybe"}: {}, {txnType, "UpdateStateOnRemoteRetryableErr"}: {}, {txnType, "AddCommitTrigger"}: {}, - {txnType, "CommandCount"}: {}, {txnType, "IsRetryableErrMeantForTxn"}: {}, {txnType, "Isolation"}: {}, {txnType, "OrigTimestamp"}: {}, {txnType, "Proto"}: {}, {txnType, "UserPriority"}: {}, - {txnType, "AnchorKey"}: {}, {txnType, "ID"}: {}, {txnType, "IsAborted"}: {}, {txnType, "IsCommitted"}: {}, diff --git a/pkg/internal/client/sender.go b/pkg/internal/client/sender.go index 16156f2b1032..db1ca5ae4736 100644 --- a/pkg/internal/client/sender.go +++ b/pkg/internal/client/sender.go @@ -18,23 +18,58 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/util/uuid" ) -// Sender is the interface used to call into a Cockroach instance. -// If the returned *roachpb.Error is not nil, no response should be returned. +// TxnType specifies whether a transaction is the root (parent) +// transaction, or a leaf (child) in a tree of client.Txns, as +// is used in a DistSQL flow. +type TxnType int + +const ( + _ TxnType = iota + // RootTxn specifies this sender is the root transaction, and is + // responsible for aggregating all transactional state and + // finalizing the transaction. + RootTxn + // LeafTxn specifies this sender is for one of potentially many + // distributed transactional senders. The state frmo this sender + // must be propagated back to the root sender and used to augment + // its state before the transaction can be finalized. + LeafTxn +) + +// Sender is the interface used to call into a CockroachDB instance. +// If the returned *roachpb.Error is not nil, no response should be +// returned. type Sender interface { Send(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) } -// SenderWithDistSQLBackdoor is implemented by the TxnCoordSender to give -// DistSQL some hacky powers when handling errors that happened on remote nodes. -type SenderWithDistSQLBackdoor interface { +// TxnSender is the interface used to call into a CockroachDB +// instance when a transaction is active. +type TxnSender interface { Sender - // GetTxnState returns the state that the TxnCoordSender has for a - // transaction. The bool is false is no state is found. - GetTxnState(txnID uuid.UUID) (roachpb.Transaction, bool) + // GetMeta retrieves a copy of the TxnCoordMeta, which can be sent + // upstream in situations where there are multiple, distributed + // TxnSenders, to be combined via AugmentMeta(). + GetMeta() roachpb.TxnCoordMeta + // AugmentMeta combines the TxnCoordMeta from another distributed + // TxnSender which is part of the same transaction. + AugmentMeta(meta roachpb.TxnCoordMeta) + // OnFinish invokes the supplied closure when the sender has finished + // with the txn (i.e. it's been abandoned, aborted, or committed). + OnFinish(func(error)) +} + +// TxnSenderFactory is the interface used to create new instances +// of TxnSender. +type TxnSenderFactory interface { + // New returns a new instance of TxnSender. The typ parameter + // specifies whether the sender is the root or one of potentially + // many child "leaf" nodes in a tree of transaction objects, as is + // created during a DistSQL flow. + New(typ TxnType) TxnSender } // SenderFunc is an adapter to allow the use of ordinary functions @@ -48,6 +83,35 @@ func (f SenderFunc) Send( return f(ctx, ba) } +// TxnSenderFunc is an adapter to allow the use of ordinary functions +// as TxnSenders with GetMeta or AugmentMeta panicing with unimplemented. +type TxnSenderFunc func(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) + +// Send calls f(ctx, c). +func (f TxnSenderFunc) Send( + ctx context.Context, ba roachpb.BatchRequest, +) (*roachpb.BatchResponse, *roachpb.Error) { + return f(ctx, ba) +} + +// GetMeta is part of the TxnSender interface. +func (f TxnSenderFunc) GetMeta() roachpb.TxnCoordMeta { panic("unimplemented") } + +// AugmentMeta is part of the TxnSender interface. +func (f TxnSenderFunc) AugmentMeta(_ roachpb.TxnCoordMeta) { panic("unimplemented") } + +// OnFinish is part of the TxnSender interface. +func (f TxnSenderFunc) OnFinish(_ func(error)) { panic("unimplemented") } + +// TxnSenderFactoryFunc is an adapter to allow the use of ordinary functions +// as TxnSenderFactories. +type TxnSenderFactoryFunc func(TxnType) TxnSender + +// New calls f(). +func (f TxnSenderFactoryFunc) New(typ TxnType) TxnSender { + return f(typ) +} + // SendWrappedWith is a convenience function which wraps the request in a batch // and sends it via the provided Sender and headers. It returns the unwrapped // response or an error. It's valid to pass a `nil` context; an empty one is diff --git a/pkg/internal/client/txn.go b/pkg/internal/client/txn.go index 2e08099eb0f8..1e0a7d817a3e 100644 --- a/pkg/internal/client/txn.go +++ b/pkg/internal/client/txn.go @@ -35,6 +35,10 @@ import ( // concurrent use by multiple goroutines. type Txn struct { db *DB + + // typ indicates the type of transaction. + typ TxnType + // gatewayNodeID, if != 0, is the ID of the node on whose behalf this // transaction is running. Normally this is the current node, but in the case // of Txns created on remote nodes by DistSQL this will be the gateway. @@ -89,19 +93,15 @@ type Txn struct { // If Txn becomes single-threaded, then the point is moot and this can again // go away. previousIDs map[uuid.UUID]struct{} - // commandCount indicates how many requests have been sent through - // this transaction. Reset on retryable txn errors. - // TODO(andrei): This is broken for DistSQL, which doesn't account for the - // requests it uses the transaction for. - commandCount int + // sender is a stateful sender for use with transactions. A new sender is + // created on transaction restarts (not retries). + sender TxnSender } - - // Set for DistSQL transactions that get errors that would otherwise be - // handled by the TxnCoordSender. - acceptUnhandledRetryableErrors bool } -// NewTxn returns a new txn. +// NewTxn returns a new txn. The typ parameter specifies whether this +// transaction is the top level (root), or one of potentially many +// distributed transactions (leaf). // // gatewayNodeID: If != 0, this is the ID of the node on whose behalf this // transaction is running. Normally this is the current node, but in the case @@ -109,8 +109,8 @@ type Txn struct { // If 0 is passed, then no value is going to be filled in the batches sent // through this txn. This will have the effect that the DistSender will fill // in the batch with the current node's ID. -func NewTxn(db *DB, gatewayNodeID roachpb.NodeID) *Txn { - return NewTxnWithProto(db, gatewayNodeID, roachpb.MakeTransaction( +func NewTxn(db *DB, gatewayNodeID roachpb.NodeID, typ TxnType) *Txn { + return NewTxnWithProto(db, gatewayNodeID, typ, roachpb.MakeTransaction( "unnamed", nil, // baseKey roachpb.NormalUserPriority, @@ -123,13 +123,16 @@ func NewTxn(db *DB, gatewayNodeID roachpb.NodeID) *Txn { // NewTxnWithProto is like NewTxn, except it returns a new txn with the provided // Transaction proto. This allows a client.Txn to be created with an already // initialized proto. -func NewTxnWithProto(db *DB, gatewayNodeID roachpb.NodeID, proto roachpb.Transaction) *Txn { +func NewTxnWithProto( + db *DB, gatewayNodeID roachpb.NodeID, typ TxnType, proto roachpb.Transaction, +) *Txn { if db == nil { log.Fatalf(context.TODO(), "attempting to create txn with nil db for Transaction: %s", proto) } proto.AssertInitialized(context.TODO()) - txn := &Txn{db: db, gatewayNodeID: gatewayNodeID} + txn := &Txn{db: db, typ: typ, gatewayNodeID: gatewayNodeID} txn.mu.Proto = proto + txn.mu.sender = db.factory.New(typ) return txn } @@ -138,25 +141,18 @@ func (txn *Txn) DB() *DB { return txn.db } -// ID returns the current ID of the transaction. -func (txn *Txn) ID() uuid.UUID { +// Sender returns a transaction's TxnSender. +func (txn *Txn) Sender() TxnSender { txn.mu.Lock() defer txn.mu.Unlock() - return txn.mu.Proto.ID + return txn.mu.sender } -// AcceptUnhandledRetryableErrors is used by DistSQL to make the client.Txn not -// freak out on errors that should be handled by the TxnCoordSender. -func (txn *Txn) AcceptUnhandledRetryableErrors() { - txn.acceptUnhandledRetryableErrors = true -} - -// CommandCount returns the count of commands executed through this txn. -// Retryable errors on the transaction will reset the count to 0. -func (txn *Txn) CommandCount() int { +// ID returns the current ID of the transaction. +func (txn *Txn) ID() uuid.UUID { txn.mu.Lock() defer txn.mu.Unlock() - return txn.mu.commandCount + return txn.mu.Proto.ID } // IsFinalized returns true if this Txn has been finalized and should therefore @@ -278,14 +274,6 @@ func (txn *Txn) OrigTimestamp() hlc.Timestamp { return txn.mu.Proto.OrigTimestamp } -// AnchorKey returns the transaction's anchor key. The caller should treat the -// returned byte slice as immutable. -func (txn *Txn) AnchorKey() []byte { - txn.mu.Lock() - defer txn.mu.Unlock() - return txn.mu.Proto.Key -} - // SetTxnAnchorKey sets the key at which to anchor the transaction record. The // transaction anchor key defaults to the first key written in a transaction. func (txn *Txn) SetTxnAnchorKey(key roachpb.Key) error { @@ -507,7 +495,6 @@ func (txn *Txn) DelRange(ctx context.Context, begin, end interface{}) error { func (txn *Txn) Run(ctx context.Context, b *Batch) error { tracing.AnnotateTrace() defer tracing.AnnotateTrace() - if err := b.prepare(); err != nil { return err } @@ -620,6 +607,14 @@ func (txn *Txn) AddCommitTrigger(trigger func()) { txn.commitTriggers = append(txn.commitTriggers, trigger) } +// OnFinish adds a closure to be executed when the transaction sender +// moves from state "ready" to "done" or "aborted". +func (txn *Txn) OnFinish(onFinishFn func(error)) { + txn.mu.Lock() + defer txn.mu.Unlock() + txn.mu.sender.OnFinish(onFinishFn) +} + // maybeFinishReadonly provides a fast-path for finishing a read-only // transaction without going through the overhead of creating an // EndTransactionRequest only to not send it. @@ -871,10 +866,12 @@ func (txn *Txn) Send( endTxnRequest, haveEndTxn := ba.Requests[lastIndex].GetInner().(*roachpb.EndTransactionRequest) var needBeginTxn, elideEndTxn bool + var sender TxnSender lockedPrelude := func() *roachpb.Error { txn.mu.Lock() defer txn.mu.Unlock() + sender = txn.mu.sender if txn.mu.Proto.Status != roachpb.PENDING || txn.mu.finalized { return roachpb.NewErrorf( "attempting to use transaction with wrong status or finalized: %s %v", @@ -935,9 +932,6 @@ func (txn *Txn) Send( ba.Requests = ba.Requests[:lastIndex] } - // Increment the statement count sent through this transaction. - txn.mu.commandCount += len(ba.Requests) - // Clone the Txn's Proto so that future modifications can be made without // worrying about synchronization. newTxn := txn.mu.Proto.Clone() @@ -950,7 +944,7 @@ func (txn *Txn) Send( // Send call through the DB. requestTxnID := ba.Txn.ID - br, pErr := txn.db.send(ctx, ba) + br, pErr := txn.db.sendUsingSender(ctx, ba, sender) // Lock for the entire response postlude. txn.mu.Lock() @@ -991,10 +985,20 @@ func (txn *Txn) Send( log.Infof(ctx, "failed batch: %s", pErr) } if retryErr, ok := pErr.GetDetail().(*roachpb.HandledRetryableTxnError); ok { - txn.updateStateOnRetryableErrLocked(ctx, *retryErr, requestTxnID) + if requestTxnID != retryErr.TxnID { + // KV should not return errors for transactions other than the one that sent + // the request. + log.Fatalf(ctx, "retryable error for the wrong txn. "+ + "requestTxnID: %s, retryErr.TxnID: %s. retryErr: %s", + requestTxnID, retryErr.TxnID, retryErr) + } else if requestTxnID == txn.mu.Proto.ID { + // Our requestTxnID still matches the proto, so update the state. + // If it doesn't match here, it means a concurrent request through + // this Txn object has already aborted and restarted the txn. + txn.updateStateOnRetryableErrLocked(ctx, retryErr) + } } - if pErr.TransactionRestart != roachpb.TransactionRestart_NONE && - !txn.acceptUnhandledRetryableErrors { + if pErr.TransactionRestart != roachpb.TransactionRestart_NONE { log.Fatalf(ctx, "unexpected retryable error at the client.Txn level: (%T) %s", pErr.GetDetail(), pErr) @@ -1004,7 +1008,7 @@ func (txn *Txn) Send( if br != nil { if br.Error != nil { - panic(roachpb.ErrorUnexpectedlySet(txn.db.sender, br)) + panic(roachpb.ErrorUnexpectedlySet(txn.mu.sender, br)) } if br.Txn != nil && br.Txn.ID != txn.mu.Proto.ID { return nil, roachpb.NewError(&roachpb.TxnPrevAttemptError{}) @@ -1060,80 +1064,54 @@ func firstWriteIndex(ba roachpb.BatchRequest) (int, *roachpb.Error) { return -1, nil } -// UpdateStateOnRemoteRetryableErr updates the Txn, and the Transaction proto -// inside it, in response to an error encountered when running a request through -// the txn. If the error is not a RetryableTxnError, then this is a no-op. For a -// retryable error, the Transaction proto is either initialized with the updated -// proto from the error, or a new Transaction proto is initialized. -func (txn *Txn) UpdateStateOnRemoteRetryableErr(ctx context.Context, pErr roachpb.Error) { +// GetTxnCoordMeta returns the TxnCoordMeta information for this +// transaction for use with AugmentTxnCoordMeta(), when combining the +// impact of multiple distributed transaction coordinators that are +// all operating on the same transaction. +func (txn *Txn) GetTxnCoordMeta() roachpb.TxnCoordMeta { txn.mu.Lock() defer txn.mu.Unlock() + return txn.mu.sender.GetMeta() +} - if pErr.TransactionRestart == roachpb.TransactionRestart_NONE { - log.Fatalf(ctx, "unexpected non-retryable error: %s", pErr) - } - if pErr.GetTxn() == nil { - // DistSQL requests (like all SQL requests) are always supposed to be done - // in a transaction. - log.Fatalf(ctx, "unexpected retryable error with no txn ran through DistSQL: %s", pErr) - } - - // Assert that the TxnCoordSender doesn't have any state for this transaction - // (and it shouldn't, since DistSQL isn't supposed to do any works in - // transaction that had performed writes and hence started being tracked). If - // the TxnCoordSender were to have state, it'd be a bad thing that we're not - // updating it. - txnID := pErr.GetTxn().ID - if _, ok := txn.db.GetSender().(SenderWithDistSQLBackdoor).GetTxnState(txnID); ok { - log.Fatalf(ctx, "unexpected state in TxnCoordSender for transaction in error: %s", pErr) - } +// AugmentTxnCoordMeta augments this transaction's TxnCoordMeta +// information with the supplied meta. For use with GetTxnCoordMeta(). +func (txn *Txn) AugmentTxnCoordMeta(meta roachpb.TxnCoordMeta) { + txn.mu.Lock() + defer txn.mu.Unlock() + txn.mu.sender.AugmentMeta(meta) +} - // Emulate the processing that the TxnCoordSender would have done on this - // error. - newTxn := roachpb.PrepareTransactionForRetry(ctx, &pErr, txn.mu.UserPriority, txn.db.clock) - newErr := roachpb.NewHandledRetryableTxnError(pErr.Message, pErr.GetTxn().ID, newTxn) +// UpdateStateOnRemoteRetryableErr updates the Txn, and the +// Transaction proto inside it, in response to an error encountered +// when running a request through the txn. +func (txn *Txn) UpdateStateOnRemoteRetryableErr( + ctx context.Context, retryErr *roachpb.HandledRetryableTxnError, +) { + txn.mu.Lock() + defer txn.mu.Unlock() - txn.updateStateOnRetryableErrLocked( - ctx, *newErr, - // We're passing the current ID as the "request"'s. In doing so, we're - // assuming that the Txn hasn't changed asynchronously since we started - // executing the query; we're relying on DistSQL queries not being - // executed concurrently with anything else using this txn. - txn.mu.Proto.ID) + // If the transaction has been reset since this request was sent, + // ignore the error. + if retryErr.TxnID != txn.mu.Proto.ID { + log.Eventf(ctx, "retryable error for an older version of txn: current txn: %s, retryErr: %s", + txn.mu.Proto, retryErr) + return + } + txn.updateStateOnRetryableErrLocked(ctx, retryErr) } // updateStateOnRetryableErrLocked updates the Transaction proto inside txn. // -// requestTxnID identifies the state of the transaction at the time when the -// request that generated retryErr was sent. It is used to see if the information -// in the error is obsolete by now. -// // This method is safe to call repeatedly for requests from the same txn epoch. // The first such update will move the Transaction forward (either create a new -// one or increment the epoch), and next calls will be no-ops. +// one or increment the epoch), and subsequent calls will be no-ops. func (txn *Txn) updateStateOnRetryableErrLocked( - ctx context.Context, retryErr roachpb.HandledRetryableTxnError, requestTxnID uuid.UUID, + ctx context.Context, retryErr *roachpb.HandledRetryableTxnError, ) { - if requestTxnID != retryErr.TxnID { - // KV should not return errors for transactions other than the one that sent - // the request. - log.Fatalf(ctx, "retryable error for the wrong txn. "+ - "requestTxnID: %s, retryErr.TxnID: %s. retryErr: %s", - requestTxnID, retryErr.TxnID, retryErr) - } - newTxn := &retryErr.Transaction - if requestTxnID != txn.mu.Proto.ID { - // We were already aborted, so ignore the retry error for the previous - // Txn incarnation. - return - } - - // Reset the statement count as this is a retryable txn error. - txn.mu.commandCount = 0 - - abortErr := requestTxnID != newTxn.ID + abortErr := txn.mu.Proto.ID != newTxn.ID if abortErr { // This means that the cause was a TransactionAbortedError; // we've created a new Transaction that we're about to start using, so we @@ -1146,6 +1124,9 @@ func (txn *Txn) updateStateOnRetryableErrLocked( // attempt. The txn inside pErr was correctly prepared for this by // TxnCoordSender. txn.mu.Proto = *newTxn + + // Create a new txn sender. + txn.mu.sender = txn.db.factory.New(txn.typ) } else { // Update the transaction proto with the one to be used for the next // attempt. The txn inside pErr was correctly prepared for this by diff --git a/pkg/internal/client/txn_test.go b/pkg/internal/client/txn_test.go index b995734929bb..76dd1fbf88e5 100644 --- a/pkg/internal/client/txn_test.go +++ b/pkg/internal/client/txn_test.go @@ -48,7 +48,7 @@ func TestTxnSnowballTrace(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - db := NewDB(newTestSender(nil), clock) + db := NewDB(newTestTxnFactory(nil), clock) tracer := tracing.NewTracer() ctx, sp, err := tracing.StartSnowballTrace(context.Background(), tracer, "test-txn") if err != nil { @@ -79,58 +79,59 @@ func TestTxnSnowballTrace(t *testing.T) { } } -// TestSender mocks out some of the txn coordinator sender's -// functionality. It responds to PutRequests using testPutResp. -func newTestSender( +func newTestTxnFactory( createReply func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error), -) SenderFunc { - return func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { - if ba.UserPriority == 0 { - ba.UserPriority = 1 - } +) TxnSenderFactoryFunc { + return TxnSenderFactoryFunc(func(_ TxnType) TxnSender { + return TxnSenderFunc(func(_ context.Context, ba roachpb.BatchRequest, + ) (*roachpb.BatchResponse, *roachpb.Error) { + if ba.UserPriority == 0 { + ba.UserPriority = 1 + } - var br *roachpb.BatchResponse - var pErr *roachpb.Error - if createReply != nil { - br, pErr = createReply(ba) - } else { - br = ba.CreateReply() - } - if pErr != nil { - return nil, pErr - } - var writing bool - status := roachpb.PENDING - for i, req := range ba.Requests { - args := req.GetInner() - if _, ok := args.(*roachpb.PutRequest); ok { - testPutRespCopy := testPutResp - union := &br.Responses[i] // avoid operating on copy - union.MustSetInner(&testPutRespCopy) + var br *roachpb.BatchResponse + var pErr *roachpb.Error + if createReply != nil { + br, pErr = createReply(ba) + } else { + br = ba.CreateReply() } - if roachpb.IsTransactionWrite(args) { - writing = true + if pErr != nil { + return nil, pErr } - } - if args, ok := ba.GetArg(roachpb.EndTransaction); ok { - et := args.(*roachpb.EndTransactionRequest) - writing = true - if et.Commit { - status = roachpb.COMMITTED - } else { - status = roachpb.ABORTED + var writing bool + status := roachpb.PENDING + for i, req := range ba.Requests { + args := req.GetInner() + if _, ok := args.(*roachpb.PutRequest); ok { + testPutRespCopy := testPutResp + union := &br.Responses[i] // avoid operating on copy + union.MustSetInner(&testPutRespCopy) + } + if roachpb.IsTransactionWrite(args) { + writing = true + } } - } - if ba.Txn != nil && br.Txn == nil { - txnClone := ba.Txn.Clone() - br.Txn = &txnClone - if pErr == nil { - br.Txn.Writing = writing - br.Txn.Status = status + if args, ok := ba.GetArg(roachpb.EndTransaction); ok { + et := args.(*roachpb.EndTransactionRequest) + writing = true + if et.Commit { + status = roachpb.COMMITTED + } else { + status = roachpb.ABORTED + } } - } - return br, pErr - } + if ba.Txn != nil && br.Txn == nil { + txnClone := ba.Txn.Clone() + br.Txn = &txnClone + if pErr == nil { + br.Txn.Writing = writing + br.Txn.Status = status + } + } + return br, pErr + }) + }) } func testPut() roachpb.BatchRequest { @@ -148,12 +149,12 @@ func TestInitPut(t *testing.T) { // TODO(vivekmenezes): update test or remove when InitPut is being // considered sufficiently tested and this path exercised. clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() return br, nil }), clock) - txn := NewTxn(db, 0 /* gatewayNodeID */) + txn := NewTxn(db, 0 /* gatewayNodeID */, RootTxn) if pErr := txn.InitPut(context.Background(), "a", "b", false); pErr != nil { t.Fatal(pErr) } @@ -180,7 +181,7 @@ func TestTxnRequestTxnTimestamp(t *testing.T) { manual := hlc.NewManualClock(testCases[0].expRequestTS.WallTime) clock := hlc.NewClock(manual.UnixNano, time.Nanosecond) var testIdx int - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { test := testCases[testIdx] if test.expRequestTS != ba.Txn.Timestamp { return nil, roachpb.NewErrorf("%d: expected ts %s got %s", testIdx, test.expRequestTS, ba.Txn.Timestamp) @@ -192,7 +193,7 @@ func TestTxnRequestTxnTimestamp(t *testing.T) { return br, nil }), clock) - txn := NewTxn(db, 0 /* gatewayNodeID */) + txn := NewTxn(db, 0 /* gatewayNodeID */, RootTxn) for testIdx = range testCases { if _, pErr := txn.Send(context.Background(), ba); pErr != nil { @@ -210,7 +211,7 @@ func TestTransactionConfig(t *testing.T) { clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) dbCtx := DefaultDBContext() dbCtx.UserPriority = 101 - db := NewDBWithContext(newTestSender(nil), clock, dbCtx) + db := NewDBWithContext(newTestTxnFactory(nil), clock, dbCtx) if err := db.Txn(context.TODO(), func(ctx context.Context, txn *Txn) error { if txn.db.ctx.UserPriority != db.ctx.UserPriority { t.Errorf("expected txn user priority %f; got %f", @@ -229,7 +230,7 @@ func TestCommitReadOnlyTransaction(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) var calls []roachpb.Method - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) return ba.CreateReply(), nil }), clock) @@ -253,7 +254,7 @@ func TestCommitReadOnlyTransactionExplicit(t *testing.T) { clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) for _, withGet := range []bool{true, false} { var calls []roachpb.Method - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) return ba.CreateReply(), nil }), clock) @@ -282,7 +283,7 @@ func TestCommitMutatingTransaction(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) var calls []roachpb.Method - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) if bt, ok := ba.GetArg(roachpb.BeginTransaction); ok && !bt.Header().Key.Equal(roachpb.Key("a")) { t.Errorf("expected begin transaction key to be \"a\"; got %s", bt.Header().Key) @@ -327,7 +328,7 @@ func TestTxnInsertBeginTransaction(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) var calls []roachpb.Method - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) return ba.CreateReply(), nil }), clock) @@ -350,7 +351,7 @@ func TestTxnInsertBeginTransaction(t *testing.T) { func TestBeginTransactionErrorIndex(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { pErr := roachpb.NewError(&roachpb.WriteIntentError{}) pErr.SetErrorIndex(0) return nil, pErr @@ -378,7 +379,7 @@ func TestCommitTransactionOnce(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) count := 0 - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { count++ return ba.CreateReply(), nil }), clock) @@ -399,7 +400,7 @@ func TestCommitTransactionOnce(t *testing.T) { func TestAbortReadOnlyTransaction(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.EndTransaction); ok { t.Errorf("did not expect EndTransaction") } @@ -423,7 +424,7 @@ func TestEndWriteRestartReadOnlyTransaction(t *testing.T) { for _, success := range []bool{true, false} { expCalls := []roachpb.Method{roachpb.BeginTransaction, roachpb.Put, roachpb.EndTransaction} var calls []roachpb.Method - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) return ba.CreateReply(), nil }), clock) @@ -462,7 +463,7 @@ func TestTransactionKeyNotChangedInRestart(t *testing.T) { clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) attempt := 0 keys := []string{"first", "second"} - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { // Ignore the final EndTxnRequest. if _, ok := ba.GetArg(roachpb.EndTransaction); ok { return ba.CreateReply(), nil @@ -518,7 +519,7 @@ func TestAbortMutatingTransaction(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) var calls []roachpb.Method - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) if et, ok := ba.GetArg(roachpb.EndTransaction); ok && et.(*roachpb.EndTransactionRequest).Commit { t.Errorf("expected commit to be false") @@ -566,7 +567,7 @@ func TestRunTransactionRetryOnErrors(t *testing.T) { for _, test := range testCases { t.Run(fmt.Sprintf("%T", test.err), func(t *testing.T) { count := 0 - db := NewDB(newTestSender( + db := NewDB(newTestTxnFactory( func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.Put); ok { @@ -622,10 +623,10 @@ func TestTransactionStatus(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - db := NewDB(newTestSender(nil), clock) + db := NewDB(newTestTxnFactory(nil), clock) for _, write := range []bool{true, false} { for _, commit := range []bool{true, false} { - txn := NewTxn(db, 0 /* gatewayNodeID */) + txn := NewTxn(db, 0 /* gatewayNodeID */, RootTxn) if _, pErr := txn.Get(context.Background(), "a"); pErr != nil { t.Fatal(pErr) @@ -657,11 +658,11 @@ func TestTransactionStatus(t *testing.T) { func TestCommitInBatchWrongTxn(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - db := NewDB(newTestSender(nil), clock) - txn := NewTxn(db, 0 /* gatewayNodeID */) + db := NewDB(newTestTxnFactory(nil), clock) + txn := NewTxn(db, 0 /* gatewayNodeID */, RootTxn) b1 := &Batch{} - txn2 := NewTxn(db, 0 /* gatewayNodeID */) + txn2 := NewTxn(db, 0 /* gatewayNodeID */, RootTxn) b2 := txn2.NewBatch() for _, b := range []*Batch{b1, b2} { @@ -677,8 +678,8 @@ func TestTimestampSelectionInOptions(t *testing.T) { defer leaktest.AfterTest(t)() mc := hlc.NewManualClock(100) clock := hlc.NewClock(mc.UnixNano, time.Nanosecond) - db := NewDB(newTestSender(nil), clock) - txn := NewTxn(db, 0 /* gatewayNodeID */) + db := NewDB(newTestTxnFactory(nil), clock) + txn := NewTxn(db, 0 /* gatewayNodeID */, RootTxn) refTimestamp := clock.Now() @@ -704,7 +705,7 @@ func TestSetPriority(t *testing.T) { clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) var expected roachpb.UserPriority - db := NewDB(newTestSender( + db := NewDB(newTestTxnFactory( func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if ba.UserPriority != expected { pErr := roachpb.NewErrorf("Priority not set correctly in the batch! "+ @@ -720,7 +721,7 @@ func TestSetPriority(t *testing.T) { // Verify the normal priority setting path. expected = roachpb.NormalUserPriority - txn := NewTxn(db, 0 /* gatewayNodeID */) + txn := NewTxn(db, 0 /* gatewayNodeID */, RootTxn) if err := txn.SetUserPriority(expected); err != nil { t.Fatal(err) } @@ -730,7 +731,7 @@ func TestSetPriority(t *testing.T) { // Verify the internal (fixed value) priority setting path. expected = roachpb.UserPriority(-13) - txn = NewTxn(db, 0 /* gatewayNodeID */) + txn = NewTxn(db, 0 /* gatewayNodeID */, RootTxn) txn.InternalSetPriority(13) if _, pErr := txn.Send(context.Background(), roachpb.BatchRequest{}); pErr != nil { t.Fatal(pErr) @@ -742,7 +743,7 @@ func TestSetPriority(t *testing.T) { func TestWrongTxnRetry(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - db := NewDB(newTestSender(nil), clock) + db := NewDB(newTestTxnFactory(nil), clock) var retries int txnClosure := func(ctx context.Context, outerTxn *Txn) error { @@ -765,7 +766,7 @@ func TestWrongTxnRetry(t *testing.T) { return roachpb.NewHandledRetryableTxnError( "test error", innerTxn.Proto().ID, *innerTxn.Proto()) } - innerTxn := NewTxn(db, 0 /* gatewayNodeID */) + innerTxn := NewTxn(db, 0 /* gatewayNodeID */, RootTxn) err := innerTxn.Exec(ctx, execOpt, innerClosure) if !testutils.IsError(err, "test error") { t.Fatalf("unexpected inner failure: %v", err) @@ -784,7 +785,7 @@ func TestWrongTxnRetry(t *testing.T) { func TestBatchMixRawRequest(t *testing.T) { defer leaktest.AfterTest(t)() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - db := NewDB(newTestSender(nil), clock) + db := NewDB(newTestTxnFactory(nil), clock) b := &Batch{} b.AddRawRequest(&roachpb.EndTransactionRequest{}) @@ -798,8 +799,8 @@ func TestUpdateDeadlineMaybe(t *testing.T) { defer leaktest.AfterTest(t)() mc := hlc.NewManualClock(1) clock := hlc.NewClock(mc.UnixNano, time.Nanosecond) - db := NewDB(nil /* sender */, clock) - txn := NewTxn(db, 0 /* gatewayNodeID */) + db := NewDB(TxnSenderFactoryFunc(func(_ TxnType) TxnSender { return nil }), clock) + txn := NewTxn(db, 0 /* gatewayNodeID */, RootTxn) if txn.deadline != nil { t.Errorf("unexpected initial deadline: %s", txn.deadline) @@ -839,7 +840,7 @@ func TestConcurrentTxnRequests(t *testing.T) { clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) var callCountsMu syncutil.Mutex callCounts := make(map[roachpb.Method]int) - db := NewDB(newTestSender(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { callCountsMu.Lock() for _, m := range ba.Methods() { callCounts[m]++ diff --git a/pkg/kv/dist_sender_server_test.go b/pkg/kv/dist_sender_server_test.go index 39e3ea19ec49..87416c36638d 100644 --- a/pkg/kv/dist_sender_server_test.go +++ b/pkg/kv/dist_sender_server_test.go @@ -30,6 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/storage/engine" "github.com/cockroachdb/cockroach/pkg/storage/engine/enginepb" @@ -39,6 +40,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/metric" + "github.com/cockroachdb/cockroach/pkg/util/tracing" ) // NOTE: these tests are in package kv_test to avoid a circular @@ -90,7 +93,17 @@ func TestRangeLookupWithOpenTransaction(t *testing.T) { }, s.(*server.TestServer).Gossip(), ) - db := client.NewDB(ds, s.Clock()) + ambient := log.AmbientContext{Tracer: tracing.NewTracer()} + tsf := kv.NewTxnCoordSenderFactory( + ambient, + cluster.MakeTestingClusterSettings(), + ds, + s.Clock(), + false, /* linearizable */ + s.Stopper(), + kv.MakeTxnMetrics(metric.TestSampleInterval), + ) + db := client.NewDB(tsf, s.Clock()) // Now, with an intent pending, attempt (asynchronously) to read // from an arbitrary key. This will cause the distributed sender to @@ -360,7 +373,7 @@ func TestMultiRangeBoundedBatchScan(t *testing.T) { defer s.Stopper().Stop(context.TODO()) ctx := context.TODO() - db := s.KVClient().(*client.DB) + db := s.DB() splits := []string{"a", "b", "c", "d", "e", "f"} if err := setupMultipleRanges(ctx, db, splits...); err != nil { t.Fatal(err) @@ -551,7 +564,7 @@ func TestMultiRangeBoundedBatchScanUnsortedOrder(t *testing.T) { ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "a", "b", "c", "d", "e", "f"); err != nil { t.Fatal(err) } @@ -590,7 +603,7 @@ func TestMultiRangeBoundedBatchScanSortedOverlapping(t *testing.T) { ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "a", "b", "c", "d", "e", "f"); err != nil { t.Fatal(err) } @@ -659,7 +672,7 @@ func TestMultiRangeBoundedBatchDelRange(t *testing.T) { ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "a", "b", "c", "d", "e", "f", "g", "h"); err != nil { t.Fatal(err) } @@ -727,7 +740,7 @@ func TestMultiRangeBoundedBatchDelRangeBoundary(t *testing.T) { ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "a", "b"); err != nil { t.Fatal(err) } @@ -773,7 +786,7 @@ func TestMultiRangeBoundedBatchDelRangeOverlappingKeys(t *testing.T) { ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "a", "b", "c", "d", "e", "f"); err != nil { t.Fatal(err) } @@ -838,7 +851,7 @@ func TestMultiRangeEmptyAfterTruncate(t *testing.T) { s, _ := startNoSplitServer(t) ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "c", "d"); err != nil { t.Fatal(err) } @@ -861,7 +874,7 @@ func TestMultiRequestBatchWithFwdAndReverseRequests(t *testing.T) { s, _ := startNoSplitServer(t) ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "a", "b"); err != nil { t.Fatal(err) } @@ -883,7 +896,7 @@ func TestMultiRangeScanReverseScanDeleteResolve(t *testing.T) { s, _ := startNoSplitServer(t) ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "b"); err != nil { t.Fatal(err) } @@ -942,7 +955,7 @@ func TestMultiRangeScanReverseScanInconsistent(t *testing.T) { s, _ := startNoSplitServer(t) ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "b"); err != nil { t.Fatal(err) } @@ -1065,7 +1078,7 @@ func TestParallelSender(t *testing.T) { } func initReverseScanTestEnv(s serverutils.TestServerInterface, t *testing.T) *client.DB { - db := s.KVClient().(*client.DB) + db := s.DB() // Set up multiple ranges: // ["", "b"),["b", "e") ,["e", "g") and ["g", "\xff\xff"). @@ -1163,7 +1176,7 @@ func TestStopAtRangeBoundary(t *testing.T) { ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "a", "b", "c", "d", "e", "f"); err != nil { t.Fatal(err) } @@ -1621,7 +1634,7 @@ func TestNoSequenceCachePutOnRangeMismatchError(t *testing.T) { s, _ := startNoSplitServer(t) ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "b", "c"); err != nil { t.Fatal(err) } @@ -1682,7 +1695,7 @@ func TestPropagateTxnOnError(t *testing.T) { ctx := context.TODO() defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() if err := setupMultipleRanges(ctx, db, "b"); err != nil { t.Fatal(err) } diff --git a/pkg/kv/local_test_cluster_util.go b/pkg/kv/local_test_cluster_util.go index da699d801b8a..a619aa82218d 100644 --- a/pkg/kv/local_test_cluster_util.go +++ b/pkg/kv/local_test_cluster_util.go @@ -46,9 +46,9 @@ func (l *localTestClusterTransport) SendNext(ctx context.Context, done chan<- Ba l.Transport.SendNext(ctx, done) } -// InitSenderForLocalTestCluster initializes a TxnCoordSender that can be used -// with LocalTestCluster. -func InitSenderForLocalTestCluster( +// InitFactoryForLocalTestCluster initializes a TxnCoordSenderFactory +// that can be used with LocalTestCluster. +func InitFactoryForLocalTestCluster( st *cluster.Settings, nodeDesc *roachpb.NodeDescriptor, tracer opentracing.Tracer, @@ -57,7 +57,7 @@ func InitSenderForLocalTestCluster( stores client.Sender, stopper *stop.Stopper, gossip *gossip.Gossip, -) client.Sender { +) client.TxnSenderFactory { retryOpts := base.DefaultRetryOptions() retryOpts.Closer = stopper.ShouldQuiesce() senderTransportFactory := SenderTransportFactory(tracer, stores) @@ -83,7 +83,7 @@ func InitSenderForLocalTestCluster( }, gossip) ambient := log.AmbientContext{Tracer: tracer} - return NewTxnCoordSender( + return NewTxnCoordSenderFactory( ambient, st, distSender, diff --git a/pkg/kv/split_test.go b/pkg/kv/split_test.go index f3460a3a808f..328beba4a6df 100644 --- a/pkg/kv/split_test.go +++ b/pkg/kv/split_test.go @@ -100,7 +100,7 @@ func startTestWriter( // which are resolved synchronously with EndTransaction and via RPC. func TestRangeSplitMeta(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() ctx := context.TODO() @@ -131,7 +131,7 @@ func TestRangeSplitMeta(t *testing.T) { // composed of a random mix of puts. func TestRangeSplitsWithConcurrentTxns(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() // This channel shuts the whole apparatus down. @@ -189,7 +189,7 @@ func TestRangeSplitsWithWritePressure(t *testing.T) { DisableScanner: true, }, } - s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) + s.Start(t, testutils.NewNodeTestBaseContext(), InitFactoryForLocalTestCluster) // This is purely to silence log spam. config.TestingSetupZoneConfigHook(s.Stopper) @@ -238,7 +238,7 @@ func TestRangeSplitsWithWritePressure(t *testing.T) { // on the same splitKey succeeds. func TestRangeSplitsWithSameKeyTwice(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() ctx := context.TODO() diff --git a/pkg/kv/txn_coord_sender.go b/pkg/kv/txn_coord_sender.go index 1ef2b98bb5d8..e16cd0505b4c 100644 --- a/pkg/kv/txn_coord_sender.go +++ b/pkg/kv/txn_coord_sender.go @@ -16,9 +16,7 @@ package kv import ( "context" - "fmt" "sort" - "sync/atomic" "time" opentracing "github.com/opentracing/opentracing-go" @@ -41,9 +39,8 @@ import ( ) const ( - statusLogInterval = 5 * time.Second - opTxnCoordSender = "txn coordinator" - opHeartbeatLoop = "heartbeat" + opTxnCoordSender = "txn coordinator" + opHeartbeatLoop = "heartbeat" ) // maxTxnIntentsBytes is a threshold in bytes for intent spans stored @@ -59,62 +56,71 @@ var maxTxnIntentsBytes = settings.RegisterIntSetting( 256*1000, ) -// txnMetadata holds information about an ongoing transaction, as -// seen from the perspective of this coordinator. It records all -// keys (and key ranges) mutated as part of the transaction for -// resolution upon transaction commit or abort. -type txnMetadata struct { - // txn is a copy of the transaction record, updated with each request. - txn roachpb.Transaction - - // keys stores key ranges affected by this transaction through this - // coordinator. By keeping this record, the coordinator will be able - // to update the write intent when the transaction is committed. - keys []roachpb.Span - - // keysSize is the total size in bytes of the key spans written - // during this transaction. - keysSize int64 - - // lastUpdateNanos is the latest wall time in nanos the client sent - // transaction operations to this coordinator. Accessed and updated - // atomically. - lastUpdateNanos int64 - - // Analogous to lastUpdateNanos, this is the wall time at which the - // transaction was instantiated. - firstUpdateNanos int64 - - // timeoutDuration is the time after which the transaction should be - // considered abandoned by the client. That is, when - // current_timestamp > lastUpdateTS + timeoutDuration. - timeoutDuration time.Duration - - // txnEnd is closed when the transaction is aborted or committed, - // terminating the associated heartbeat instance. - txnEnd chan struct{} -} +type txnCoordState int -// setLastUpdate updates the wall time (in nanoseconds) since the most -// recent client operation for this transaction through the coordinator. -func (tm *txnMetadata) setLastUpdate(nowNanos int64) { - atomic.StoreInt64(&tm.lastUpdateNanos, nowNanos) -} +const ( + _ txnCoordState = iota + // done indicates the transaction has been completed via end + // transaction and can no longer be used. + done + // aborted indicates the transaction was aborted or abandoned (e.g. + // from timeout, heartbeat failure, context cancelation, txn abort + // or restart, etc.) + aborted +) -// getLastUpdate atomically loads the nanosecond wall time of the most -// recent client operation. -func (tm *txnMetadata) getLastUpdate() int64 { - return atomic.LoadInt64(&tm.lastUpdateNanos) -} +// A TxnCoordSender is an implementation of client.Sender which wraps +// a lower-level Sender (either a storage.Stores or a DistSender) to +// which it sends commands. It acts as a man-in-the-middle, +// coordinating transaction state for clients. Unlike other senders, +// the TxnCoordSender is stateful and holds information about an +// ongoing transaction. Among other things, it records the intent +// spans of keys mutated by the transaction for later +// resolution. +// +// After a transaction has begun writing, the TxnCoordSender may start +// sending periodic heartbeat messages to that transaction's txn +// record, to keep it live. Note that this capability is optional, and +// intended for use only from the top level transaction coordinator, +// in the event that multiple coordinators are active (i.e. in a +// distributed SQL flow). +type TxnCoordSender struct { + mu struct { + syncutil.Mutex + // meta contains all coordinator state which may be passed between + // distributed TxnCoordSenders via MetaRelease() and MetaAugment(). + meta roachpb.TxnCoordMeta + + // intentsSize is the size in bytes of the intent spans in the + // meta, maintained to efficiently check the threshold. + intentsSize int64 + // lastUpdateNanos is the latest wall time in nanos the client sent + // transaction operations to this coordinator. Accessed and updated + // atomically. + lastUpdateNanos int64 + // Analogous to lastUpdateNanos, this is the wall time at which the + // transaction was instantiated. + firstUpdateNanos int64 + // txnEnd is closed when the transaction is aborted or committed, + // terminating the associated heartbeat instance. + txnEnd chan struct{} + // state indicates the state of the transaction coordinator. + state txnCoordState + // onFinishFn is a closure invoked when state changes to done or aborted. + onFinishFn func(error) + } -// hasClientAbandonedCoord returns true if the transaction has not -// been updated by the client adding a request within the allowed -// timeout. -func (tm *txnMetadata) hasClientAbandonedCoord(nowNanos int64) bool { - timeout := nowNanos - tm.timeoutDuration.Nanoseconds() - return tm.getLastUpdate() < timeout + // A pointer member to the creating factory provides access to + // immutable factory settings. + *TxnCoordSenderFactory + + // typ specifies whether this transaction is the top level, + // or one of potentially many distributed transactions. + typ client.TxnType } +var _ client.TxnSender = &TxnCoordSender{} + // TxnMetrics holds all metrics relating to KV transactions. type TxnMetrics struct { Aborts *metric.CounterWithRates @@ -183,16 +189,8 @@ func MakeTxnMetrics(histogramWindow time.Duration) TxnMetrics { } } -// A TxnCoordSender is an implementation of client.Sender which -// wraps a lower-level Sender (either a storage.Stores or a DistSender) -// to which it sends commands. It acts as a man-in-the-middle, -// coordinating transaction state for clients. After a transaction is -// started, the TxnCoordSender starts asynchronously sending heartbeat -// messages to that transaction's txn record, to keep it live. It also -// keeps track of each written key or key range over the course of the -// transaction. When the transaction is committed or aborted, it -// clears accumulated write intents for the transaction. -type TxnCoordSender struct { +// TxnCoordSenderFactory implements client.TxnSenderFactory. +type TxnCoordSenderFactory struct { log.AmbientContext st *cluster.Settings @@ -200,24 +198,21 @@ type TxnCoordSender struct { clock *hlc.Clock heartbeatInterval time.Duration clientTimeout time.Duration - txnMu struct { - syncutil.Mutex - txns map[uuid.UUID]*txnMetadata // txn key to metadata - } - linearizable bool // enables linearizable behavior - stopper *stop.Stopper - metrics TxnMetrics + linearizable bool // enables linearizable behavior + stopper *stop.Stopper + metrics TxnMetrics } -var _ client.SenderWithDistSQLBackdoor = &TxnCoordSender{} +var _ client.TxnSenderFactory = &TxnCoordSenderFactory{} const defaultClientTimeout = 10 * time.Second -// NewTxnCoordSender creates a new TxnCoordSender for use from a KV -// distributed DB instance. -// ctx is the base context and is used for logs and traces when there isn't a -// more specific context available; it must have a Tracer set. -func NewTxnCoordSender( +// NewTxnCoordSenderFactory creates a new TxnCoordSenderFactory. The +// factory creates new instances of TxnCoordSenders. +// +// TODO(spencer): move these settings into a configuration object and +// supply that to each sender. +func NewTxnCoordSenderFactory( ambient log.AmbientContext, st *cluster.Settings, wrapped client.Sender, @@ -225,8 +220,8 @@ func NewTxnCoordSender( linearizable bool, stopper *stop.Stopper, txnMetrics TxnMetrics, -) *TxnCoordSender { - tc := &TxnCoordSender{ +) *TxnCoordSenderFactory { + return &TxnCoordSenderFactory{ AmbientContext: ambient, st: st, wrapped: wrapped, @@ -237,83 +232,51 @@ func NewTxnCoordSender( stopper: stopper, metrics: txnMetrics, } - tc.txnMu.txns = map[uuid.UUID]*txnMetadata{} - - ctx := tc.AnnotateCtx(context.Background()) - tc.stopper.RunWorker(ctx, func(ctx context.Context) { - tc.printStatsLoop(ctx) - }) - return tc } -// printStatsLoop blocks and periodically logs transaction statistics -// (throughput, success rates, durations, ...). Note that this only captures -// write txns, since read-only txns are stateless as far as TxnCoordSender is -// concerned. stats). -func (tc *TxnCoordSender) printStatsLoop(ctx context.Context) { - res := time.Millisecond // for duration logging resolution - var statusLogTimer timeutil.Timer - defer statusLogTimer.Stop() - scale := metric.Scale1M - for { - statusLogTimer.Reset(statusLogInterval) - select { - case <-statusLogTimer.C: - statusLogTimer.Read = true - // Take a snapshot of metrics. There's some chance of skew, since the snapshots are - // not done atomically, but that should be fine for these debug stats. - metrics := tc.metrics - durations, durationsWindow := metrics.Durations.Windowed() - restarts, restartsWindow := metrics.Restarts.Windowed() - if restartsWindow != durationsWindow { - log.Fatalf(ctx, - "misconfigured windowed histograms: %s != %s", - restartsWindow, - durationsWindow, - ) - } - commitRate := metrics.Commits.Rates[scale].Value() - commit1PCRate := metrics.Commits1PC.Rates[scale].Value() - abortRate := metrics.Aborts.Rates[scale].Value() - abandonRate := metrics.Abandons.Rates[scale].Value() - - // Show transaction stats over the last minute. Maybe this should - // be shorter in the future. We'll revisit if we get sufficient - // feedback. - totalRate := commitRate + abortRate + abandonRate - var pCommitted, pCommitted1PC, pAbandoned, pAborted float64 - if totalRate > 0 { - pCommitted = 100 * (commitRate / totalRate) - pCommitted1PC = 100 * (commit1PCRate / totalRate) - pAborted = 100 * (abortRate / totalRate) - pAbandoned = 100 * (abandonRate / totalRate) - } +// New is part of the TxnCoordSenderFactory interface. +func (tcf *TxnCoordSenderFactory) New(typ client.TxnType) client.TxnSender { + return &TxnCoordSender{ + typ: typ, + TxnCoordSenderFactory: tcf, + } +} - dMean := durations.Mean() - dDev := durations.StdDev() - dMax := durations.Max() - rMean := restarts.Mean() - rDev := restarts.StdDev() - rMax := restarts.Max() - num := durations.TotalCount() +// GetMeta is part of the client.TxnSender interface. +func (tc *TxnCoordSender) GetMeta() roachpb.TxnCoordMeta { + tc.mu.Lock() + defer tc.mu.Unlock() + // Copy mutable state so access is safe for the caller. + meta := tc.mu.meta + meta.Txn = tc.mu.meta.Txn.Clone() + meta.Intents = append([]roachpb.Span(nil), tc.mu.meta.Intents...) + return meta +} - // We could skip calculating everything if !log.V(1) but we want to make - // sure the code above doesn't silently break. - if log.V(1) { - log.Infof(ctx, - "txn coordinator: %.2f txn/sec, %.2f/%.2f/%.2f/%.2f %%cmmt/cmmt1pc/abrt/abnd, "+ - "%s/%s/%s avg/σ/max duration, %.1f/%.1f/%d avg/σ/max restarts (%d samples over %s)", - totalRate, pCommitted, pCommitted1PC, pAborted, pAbandoned, - duration.Truncate(time.Duration(dMean), res), - duration.Truncate(time.Duration(dDev), res), - duration.Truncate(time.Duration(dMax), res), - rMean, rDev, rMax, num, restartsWindow, - ) - } - case <-tc.stopper.ShouldStop(): - return - } +// AugmentMeta is part of the client.TxnSender interface. +func (tc *TxnCoordSender) AugmentMeta(meta roachpb.TxnCoordMeta) { + tc.mu.Lock() + defer tc.mu.Unlock() + // Sanity check: don't combine if the meta is for a different txn ID. + if tc.mu.meta.Txn.ID != meta.Txn.ID { + return } + tc.mu.meta.Txn.Update(&meta.Txn) + tc.mu.meta.Intents, _ = roachpb.MergeSpans(append(tc.mu.meta.Intents, meta.Intents...)) + tc.mu.meta.CommandCount += meta.CommandCount + + // Recompute the size of the intents. + tc.mu.intentsSize = 0 + for _, i := range tc.mu.meta.Intents { + tc.mu.intentsSize += int64(len(i.Key) + len(i.EndKey)) + } +} + +// OnFinish is part of the client.TxnSender interface. +func (tc *TxnCoordSender) OnFinish(onFinishFn func(error)) { + tc.mu.Lock() + defer tc.mu.Unlock() + tc.mu.onFinishFn = onFinishFn } // Send implements the batch.Sender interface. If the request is part of a @@ -375,30 +338,32 @@ func (tc *TxnCoordSender) Send( } if pErr := func() *roachpb.Error { - tc.txnMu.Lock() - defer tc.txnMu.Unlock() - if pErr := tc.maybeRejectClientLocked(ctx, *ba.Txn); pErr != nil { - return pErr + tc.mu.Lock() + defer tc.mu.Unlock() + if ba.Txn.Writing { + if pErr := tc.maybeRejectClientLocked(ctx, ba.Txn.ID); pErr != nil { + return pErr + } } + tc.mu.meta.CommandCount += int32(len(ba.Requests)) if !hasET { return nil } // Everything below is carried out only when trying to finish a txn. + if tc.typ == client.LeafTxn { + return roachpb.NewErrorf("cannot commit on a leaf transaction coordinator") + } // Populate et.IntentSpans, taking into account both any existing // and new writes, and taking care to perform proper deduplication. - txnMeta := tc.txnMu.txns[txnID] - distinctSpans := true - var keysSize int64 - if txnMeta != nil { - et.IntentSpans = txnMeta.keys - keysSize = txnMeta.keysSize - // Defensively set distinctSpans to false if we had any previous - // writes in this transaction. This effectively limits the distinct - // spans optimization to 1pc transactions. - distinctSpans = len(txnMeta.keys) == 0 - } + et.IntentSpans = tc.mu.meta.Intents + intentsSize := tc.mu.intentsSize + // Defensively set distinctSpans to false if we had any previous + // writes in this transaction. This effectively limits the distinct + // spans optimization to 1pc transactions. + distinctSpans := len(tc.mu.meta.Intents) == 0 + // We can't pass in a batch response here to better limit the key // spans as we don't know what is going to be affected. This will // affect queries such as `DELETE FROM my.table LIMIT 10` when @@ -409,15 +374,15 @@ func (tc *TxnCoordSender) Send( Key: key, EndKey: endKey, }) - keysSize += int64(len(key) + len(endKey)) + intentsSize += int64(len(key) + len(endKey)) }) // The request might already be used by an outgoing goroutine, // so we can't safely mutate anything in-place (as // maybeCondenseIntentSpans and MergeSpans do). et.IntentSpans = append([]roachpb.Span(nil), et.IntentSpans...) var err error - if et.IntentSpans, keysSize, err = tc.maybeCondenseIntentSpans( - ctx, et.IntentSpans, keysSize, + if et.IntentSpans, intentsSize, err = tc.maybeCondenseIntentSpans( + ctx, et.IntentSpans, intentsSize, ); err != nil { return roachpb.NewError(err) } @@ -432,10 +397,9 @@ func (tc *TxnCoordSender) Send( // in the client. return roachpb.NewErrorf("cannot commit a read-only transaction") } - if txnMeta != nil { - txnMeta.keys = et.IntentSpans - txnMeta.keysSize = keysSize - } + tc.mu.meta.Intents = et.IntentSpans + tc.mu.intentsSize = intentsSize + return nil }(); pErr != nil { return nil, pErr @@ -497,9 +461,10 @@ func (tc *TxnCoordSender) Send( }() } if br.Txn.Status != roachpb.PENDING { - tc.txnMu.Lock() - tc.cleanupTxnLocked(ctx, *br.Txn) - tc.txnMu.Unlock() + tc.mu.Lock() + tc.mu.meta.Txn = br.Txn.Clone() + tc.cleanupTxnLocked(ctx, done) + tc.mu.Unlock() } return br, nil } @@ -586,29 +551,18 @@ func (tc *TxnCoordSender) maybeCondenseIntentSpans( // state that prevents it from continuing, such as the coordinator having // considered the client abandoned, or a heartbeat having reported an error. func (tc *TxnCoordSender) maybeRejectClientLocked( - ctx context.Context, txn roachpb.Transaction, + ctx context.Context, txnID uuid.UUID, ) *roachpb.Error { - - if !txn.Writing { - return nil - } - txnMeta, ok := tc.txnMu.txns[txn.ID] // Check whether the transaction is still tracked and has a chance of // completing. It's possible that the coordinator learns about the // transaction having terminated from a heartbeat, and GC queue correctness // (along with common sense) mandates that we don't let the client // continue. switch { - case !ok: - log.VEventf(ctx, 2, "rejecting unknown txn: %s", txn.ID) - // TODO(spencerkimball): Could add coordinator node ID to the - // transaction session so that we can definitively return the right - // error between these possible errors. Or update the code to make an - // educated guess based on the incoming transaction timestamp. - return roachpb.NewError(&roachpb.UntrackedTxnError{}) - case txnMeta.txn.Status == roachpb.ABORTED: - tc.cleanupTxnLocked(ctx, txnMeta.txn) - abortedErr := roachpb.NewErrorWithTxn(roachpb.NewTransactionAbortedError(), &txnMeta.txn) + case tc.mu.state == aborted: + fallthrough + case tc.mu.meta.Txn.Status == roachpb.ABORTED: + abortedErr := roachpb.NewErrorWithTxn(roachpb.NewTransactionAbortedError(), &tc.mu.meta.Txn) // TODO(andrei): figure out a UserPriority to use here. newTxn := roachpb.PrepareTransactionForRetry( ctx, abortedErr, @@ -616,11 +570,12 @@ func (tc *TxnCoordSender) maybeRejectClientLocked( roachpb.NormalUserPriority, tc.clock) return roachpb.NewError(roachpb.NewHandledRetryableTxnError( - abortedErr.Message, txn.ID, newTxn)) - case txnMeta.txn.Status == roachpb.COMMITTED: - tc.cleanupTxnLocked(ctx, txnMeta.txn) + abortedErr.Message, txnID, newTxn)) + + case tc.mu.meta.Txn.Status == roachpb.COMMITTED: return roachpb.NewErrorWithTxn(roachpb.NewTransactionStatusError( - "transaction is already committed"), &txnMeta.txn) + "transaction is already committed"), &tc.mu.meta.Txn) + default: return nil } @@ -656,53 +611,40 @@ func (tc *TxnCoordSender) validateTxnForBatch(ctx context.Context, ba *roachpb.B return nil } -// cleanupTxnLocked is called when a transaction ends. The transaction record is -// updated and the heartbeat goroutine signaled to clean up the transaction -// gracefully. -func (tc *TxnCoordSender) cleanupTxnLocked(ctx context.Context, txn roachpb.Transaction) { - log.Event(ctx, "coordinator stops") - txnMeta, ok := tc.txnMu.txns[txn.ID] +// cleanupTxnLocked is called when a transaction ends. The heartbeat +// goroutine signaled to clean up the transaction gracefully. +func (tc *TxnCoordSender) cleanupTxnLocked(ctx context.Context, state txnCoordState) { + tc.mu.state = state + if tc.mu.onFinishFn != nil { + tc.mu.onFinishFn(tc.maybeRejectClientLocked(ctx, tc.mu.meta.Txn.ID).GetDetail()) + } + tc.mu.meta.Intents = nil + tc.mu.intentsSize = 0 + // The heartbeat might've already removed the record. Or we may have already // closed txnEnd but we are racing with the heartbeat cleanup. - if !ok || txnMeta.txnEnd == nil { + if tc.mu.txnEnd == nil { return } - - // The supplied txn may be newer than the one in txnMeta, which is relevant - // for stats. We clone the txn before storing it, as the caller might not - // have provided a deep-copy, and we don't want to share Transactions in the - // TxnCoordSender's map with anyone. - txnMeta.txn = txn.Clone() // Trigger heartbeat shutdown. - close(txnMeta.txnEnd) - txnMeta.txnEnd = nil + log.Event(ctx, "coordinator stops") + close(tc.mu.txnEnd) + tc.mu.txnEnd = nil } -// unregisterTxn deletes a txnMetadata object from the sender -// and collects its stats. It assumes the lock is held. Returns +// finalTxnStatsLocked collects a transaction's final statistics. Returns // the duration, restarts, and finalized txn status. -func (tc *TxnCoordSender) unregisterTxnLocked( - txnID uuid.UUID, -) (duration, restarts int64, status roachpb.TransactionStatus) { - txnMeta := tc.txnMu.txns[txnID] // guaranteed to exist - if txnMeta == nil { - panic(fmt.Sprintf("attempt to unregister non-existent transaction: %s", txnID)) - } - duration = tc.clock.PhysicalNow() - txnMeta.firstUpdateNanos - restarts = int64(txnMeta.txn.Epoch) - status = txnMeta.txn.Status - - txnMeta.keys = nil - - delete(tc.txnMu.txns, txnID) - +func (tc *TxnCoordSender) finalTxnStatsLocked() (duration, restarts int64, status roachpb.TransactionStatus) { + duration = tc.clock.PhysicalNow() - tc.mu.firstUpdateNanos + restarts = int64(tc.mu.meta.Txn.Epoch) + status = tc.mu.meta.Txn.Status return duration, restarts, status } // heartbeatLoop periodically sends a HeartbeatTxn RPC to an extant transaction, // stopping in the event the transaction is aborted or committed after // attempting to resolve the intents. When the heartbeat stops, the transaction -// is unregistered from the coordinator. +// stats are updated based on its final disposition. // // TODO(dan): The Context we use for this is currently the one from the first // request in a Txn, but the semantics of this aren't good. Each context has its @@ -710,19 +652,13 @@ func (tc *TxnCoordSender) unregisterTxnLocked( // that we pass the same one in every request, but it's brittle to rely on this // forever. // TODO(wiz): Update (*DBServer).Batch to not use context.TODO(). -func (tc *TxnCoordSender) heartbeatLoop(ctx context.Context, txnID uuid.UUID) { +func (tc *TxnCoordSender) heartbeatLoop(ctx context.Context) { var tickChan <-chan time.Time { ticker := time.NewTicker(tc.heartbeatInterval) tickChan = ticker.C defer ticker.Stop() } - defer func() { - tc.txnMu.Lock() - duration, restarts, status := tc.unregisterTxnLocked(txnID) - tc.txnMu.Unlock() - tc.updateStats(duration, restarts, status, false) - }() var closer <-chan struct{} // TODO(tschottdorf): this should join to the trace of the request @@ -731,11 +667,22 @@ func (tc *TxnCoordSender) heartbeatLoop(ctx context.Context, txnID uuid.UUID) { defer sp.Finish() ctx = opentracing.ContextWithSpan(ctx, sp) + defer func() { + tc.mu.Lock() + if tc.mu.txnEnd != nil { + close(tc.mu.txnEnd) + tc.mu.txnEnd = nil + } + duration, restarts, status := tc.finalTxnStatsLocked() + tc.mu.Unlock() + tc.updateStats(duration, restarts, status, false) + }() + { - tc.txnMu.Lock() - txnMeta := tc.txnMu.txns[txnID] // do not leak to outer scope - closer = txnMeta.txnEnd - tc.txnMu.Unlock() + tc.mu.Lock() + tc.mu.txnEnd = make(chan struct{}) + closer = tc.mu.txnEnd + tc.mu.Unlock() } if closer == nil { // Avoid race in which a Txn is cleaned up before the heartbeat @@ -746,7 +693,7 @@ func (tc *TxnCoordSender) heartbeatLoop(ctx context.Context, txnID uuid.UUID) { for { select { case <-tickChan: - if !tc.heartbeat(ctx, txnID) { + if !tc.heartbeat(ctx) { return } case <-closer: @@ -758,7 +705,8 @@ func (tc *TxnCoordSender) heartbeatLoop(ctx context.Context, txnID uuid.UUID) { // responsible for timing out transactions. If ctx.Done() is not nil, then // then heartbeat loop ignores the timeout check and this case is // responsible for client timeouts. - tc.tryAsyncAbort(txnID) + log.Eventf(ctx, "transaction heartbeat stopped: %s", ctx.Err()) + tc.tryAsyncAbort(ctx) return case <-tc.stopper.ShouldQuiesce(): return @@ -766,60 +714,64 @@ func (tc *TxnCoordSender) heartbeatLoop(ctx context.Context, txnID uuid.UUID) { } } -// tryAsyncAbort (synchronously) grabs a copy of the txn proto and the intents -// (which it then clears from txnMeta), and asynchronously tries to abort the -// transaction. -func (tc *TxnCoordSender) tryAsyncAbort(txnID uuid.UUID) { - tc.txnMu.Lock() - txnMeta := tc.txnMu.txns[txnID] +// tryAsyncAbort (synchronously) grabs a copy of the txn proto and the +// intents (which it then clears from meta), and asynchronously tries +// to abort the transaction. +func (tc *TxnCoordSender) tryAsyncAbort(ctx context.Context) { + tc.mu.Lock() + defer tc.mu.Unlock() // Clone the intents and the txn to avoid data races. - intentSpans, _ := roachpb.MergeSpans(append([]roachpb.Span(nil), txnMeta.keys...)) - txnMeta.keys = nil - txnMeta.keysSize = 0 - txn := txnMeta.txn.Clone() - tc.txnMu.Unlock() + intentSpans, _ := roachpb.MergeSpans(append([]roachpb.Span(nil), tc.mu.meta.Intents...)) + tc.cleanupTxnLocked(ctx, aborted) + txn := tc.mu.meta.Txn.Clone() // Since we don't hold the lock continuously, it's possible that two aborts // raced here. That's fine (and probably better than the alternative, which - // is missing new intents sometimes). - if txn.Status != roachpb.PENDING { + // is missing new intents sometimes). Note that the txn may be uninitialized + // here if a failure occurred before the first write succeeded. + if txn.Status != roachpb.PENDING || txn.ID == (uuid.UUID{}) { return } // NB: use context.Background() here because we may be called when the // caller's context has been canceled. - ctx := tc.AnnotateCtx(context.Background()) - if err := tc.stopper.RunAsyncTask(ctx, "kv.TxnCoordSender: aborting txn", func(ctx context.Context) { - ba := roachpb.BatchRequest{} - ba.Txn = &txn - - et := &roachpb.EndTransactionRequest{ - Span: roachpb.Span{ - Key: txn.Key, - }, - Commit: false, - IntentSpans: intentSpans, - } - ba.Add(et) - - // Use the wrapped sender since the normal Sender does not allow - // clients to specify intents. - if _, pErr := tc.wrapped.Send(ctx, ba); pErr != nil { - if log.V(1) { - log.Warningf(ctx, "abort due to inactivity failed for %s: %s ", txn, pErr) + if err := tc.stopper.RunAsyncTask( + tc.AnnotateCtx(context.Background()), "kv.TxnCoordSender: aborting txn", func(ctx context.Context) { + // Use the wrapped sender since the normal Sender does not allow + // clients to specify intents. + resp, pErr := client.SendWrappedWith( + ctx, tc.wrapped, roachpb.Header{Txn: &txn}, &roachpb.EndTransactionRequest{ + Span: roachpb.Span{ + Key: txn.Key, + }, + Commit: false, + IntentSpans: intentSpans, + }, + ) + tc.mu.Lock() + defer tc.mu.Unlock() + if pErr != nil { + if log.V(1) { + log.Warningf(ctx, "abort due to inactivity failed for %s: %s ", txn, pErr) + } + if errTxn := pErr.GetTxn(); errTxn != nil { + tc.mu.meta.Txn.Update(errTxn) + } + } else { + tc.mu.meta.Txn.Update(resp.(*roachpb.EndTransactionResponse).Txn) } - } - }); err != nil { + }, + ); err != nil { log.Warning(ctx, err) } } -func (tc *TxnCoordSender) heartbeat(ctx context.Context, txnID uuid.UUID) bool { - tc.txnMu.Lock() - txnMeta := tc.txnMu.txns[txnID] - txn := txnMeta.txn.Clone() - hasAbandoned := txnMeta.hasClientAbandonedCoord(tc.clock.PhysicalNow()) - tc.txnMu.Unlock() +func (tc *TxnCoordSender) heartbeat(ctx context.Context) bool { + tc.mu.Lock() + txn := tc.mu.meta.Txn.Clone() + timeout := tc.clock.PhysicalNow() - tc.clientTimeout.Nanoseconds() + hasAbandoned := tc.mu.lastUpdateNanos < timeout + tc.mu.Unlock() if txn.Status != roachpb.PENDING { // A previous iteration has already determined that the transaction is @@ -850,9 +802,10 @@ func (tc *TxnCoordSender) heartbeat(ctx context.Context, txnID uuid.UUID) bool { // done with a transaction. if ctx.Done() == nil && hasAbandoned { if log.V(1) { - log.Infof(ctx, "transaction %s abandoned; stopping heartbeat", txnMeta.txn) + log.Infof(ctx, "transaction %s abandoned; stopping heartbeat", txn) } - tc.tryAsyncAbort(txnID) + log.Eventf(ctx, "transaction abandoned heartbeat stopped") + tc.tryAsyncAbort(ctx) return false } @@ -870,26 +823,33 @@ func (tc *TxnCoordSender) heartbeat(ctx context.Context, txnID uuid.UUID) bool { // Correctness mandates that when we can't heartbeat the transaction, we // make sure the client doesn't keep going. This is particularly relevant - // in the case of an ABORTED transaction, but if we can't reach the - // transaction record at all, we're going to have to assume we're aborted - // as well. + // in the case of an ABORTED transaction, but event if we can't reach the + // transaction record at all, we have to assume it's been aborted as well. if pErr != nil { - log.Warningf(ctx, "heartbeat to %s failed: %s", txn, pErr) + log.Eventf(ctx, "heartbeat failed to %s: %s", txn, pErr) + if errTxn := pErr.GetTxn(); errTxn != nil { + tc.mu.Lock() + tc.mu.meta.Txn.Update(errTxn) + tc.mu.Unlock() + } // We're not going to let the client carry out additional requests, so - // try to clean up. - tc.tryAsyncAbort(txn.ID) - txn.Status = roachpb.ABORTED - } else { - txn.Update(br.Responses[0].GetInner().(*roachpb.HeartbeatTxnResponse).Txn) + // try to clean up if the known txn disposition remains PENDING. + if txn.Status == roachpb.PENDING { + log.Eventf(ctx, "transaction heartbeat failed: %s", pErr) + tc.tryAsyncAbort(ctx) + } + // Stop the heartbeat. + return false } + txn.Update(br.Responses[0].GetInner().(*roachpb.HeartbeatTxnResponse).Txn) // Give the news to the txn in the txns map. This will update long-running // transactions (which may find out that they have to restart in that way), // but in particular makes sure that they notice when they've been aborted // (in which case we'll give them an error on their next request). - tc.txnMu.Lock() - tc.txnMu.txns[txnID].txn.Update(&txn) - tc.txnMu.Unlock() + tc.mu.Lock() + tc.mu.meta.Txn.Update(&txn) + tc.mu.Unlock() return true } @@ -899,10 +859,10 @@ func (tc *TxnCoordSender) heartbeat(ctx context.Context, txnID uuid.UUID) bool { // object when adequate. It also updates retryable errors with the // updated transaction for use by client restarts. // -// startNS is the time when the request that's updating the state has been sent. -// This is not used if the request is known to not be the one in charge of -// starting tracking the transaction - i.e. this is the case for DistSQL, which -// just does reads and passes 0. +// startNS is the time when the request that's updating the state has +// been sent. This is not used if the request is known to not be the +// one in charge of starting tracking the transaction - i.e. this is +// the case for DistSQL, which just does reads and passes 0. func (tc *TxnCoordSender) updateState( ctx context.Context, startNS int64, @@ -911,9 +871,6 @@ func (tc *TxnCoordSender) updateState( pErr *roachpb.Error, ) *roachpb.Error { - tc.txnMu.Lock() - defer tc.txnMu.Unlock() - if ba.Txn == nil { // Not a transactional request. return pErr @@ -951,11 +908,16 @@ func (tc *TxnCoordSender) updateState( } newTxn = roachpb.PrepareTransactionForRetry(ctx, pErr, ba.UserPriority, tc.clock) + // Reset the statement count as this is a retryable txn error. + tc.mu.meta.CommandCount = 0 + + // If the ID changed, it means we had to start a new transaction + // and the old one is toast. Try an asynchronous abort of the + // prior transaction to clean up its intents immediately, which + // likely will otherwise require synchronous cleanup by the + // restated transaction. if errTxnID != newTxn.ID { - // If the ID changed, it means we had to start a new transaction and the - // old one is toast. Clean up the freshly aborted transaction in - // defer(), avoiding a race with the state update below. - defer tc.cleanupTxnLocked(ctx, *ba.Txn) + tc.tryAsyncAbort(ctx) } // Pass a HandledRetryableTxnError up to the next layer. pErr = roachpb.NewError( @@ -981,7 +943,9 @@ func (tc *TxnCoordSender) updateState( } } - txnMeta := tc.txnMu.txns[txnID] + tc.mu.Lock() + defer tc.mu.Unlock() + // For successful transactional requests, keep the written intents and // the updated transaction record to be sent along with the reply. // The transaction metadata is created with the first writing operation. @@ -1006,59 +970,49 @@ func (tc *TxnCoordSender) updateState( // This is ok, since the following block will be a no-op if the batch // contained no transactional write requests. _, ambiguousErr := pErr.GetDetail().(*roachpb.AmbiguousResultError) - if txnMeta != nil || pErr == nil || ambiguousErr || newTxn.Writing { + if pErr == nil || ambiguousErr || newTxn.Writing { // Adding the intents even on error reduces the likelihood of dangling // intents blocking concurrent writers for extended periods of time. // See #3346. - var keys []roachpb.Span - var keysSize int64 - if txnMeta != nil { - keys = txnMeta.keys - keysSize = txnMeta.keysSize - } + intents := tc.mu.meta.Intents + intentsSize := tc.mu.intentsSize ba.IntentSpanIterate(br, func(key, endKey roachpb.Key) { - keys = append(keys, roachpb.Span{Key: key, EndKey: endKey}) - keysSize += int64(len(key) + len(endKey)) + intents = append(intents, roachpb.Span{Key: key, EndKey: endKey}) + intentsSize += int64(len(key) + len(endKey)) }) - if condensedKeys, condensedKeysSize, err := tc.maybeCondenseIntentSpans(ctx, keys, keysSize); err != nil { + if condensedIntents, condensedIntentsSize, err := tc.maybeCondenseIntentSpans(ctx, intents, intentsSize); err != nil { log.ErrEventf(ctx, "failed to condense intent spans (%s); skipping", err) } else { - keys, keysSize = condensedKeys, condensedKeysSize + intents, intentsSize = condensedIntents, condensedIntentsSize } - if txnMeta != nil { - txnMeta.keys = keys - txnMeta.keysSize = keysSize - } else if len(keys) > 0 { + // Initialize the first update time and maybe start the heartbeat. + if tc.mu.firstUpdateNanos == 0 && len(intents) > 0 { // If the transaction is already over, there's no point in - // launching a one-off coordinator which will shut down right + // launching a one-off heartbeat which will shut down right // away. If we ended up here with an error, we'll always start // the coordinator - the transaction has laid down intents, so // we expect it to be committed/aborted at some point in the // future. if _, isEnding := ba.GetArg(roachpb.EndTransaction); pErr != nil || !isEnding { log.Event(ctx, "coordinator spawns") - txnMeta = &txnMetadata{ - txn: newTxn, - keys: keys, - keysSize: keysSize, - firstUpdateNanos: startNS, - lastUpdateNanos: tc.clock.PhysicalNow(), - timeoutDuration: tc.clientTimeout, - txnEnd: make(chan struct{}), - } - tc.txnMu.txns[txnID] = txnMeta - - if err := tc.stopper.RunAsyncTask( - ctx, "kv.TxnCoordSender: heartbeat loop", func(ctx context.Context) { - tc.heartbeatLoop(ctx, txnID) - }); err != nil { - // The system is already draining and we can't start the - // heartbeat. We refuse new transactions for now because - // they're likely not going to have all intents committed. - // In principle, we can relax this as needed though. - tc.unregisterTxnLocked(txnID) - return roachpb.NewError(err) + tc.mu.firstUpdateNanos = startNS + + // Only heartbeat the txn record if we're the root transaction. + if tc.typ == client.RootTxn { + if err := tc.stopper.RunAsyncTask( + ctx, "kv.TxnCoordSender: heartbeat loop", func(ctx context.Context) { + tc.heartbeatLoop(ctx) + }); err != nil { + // The system is already draining and we can't start the + // heartbeat. We refuse new transactions for now because + // they're likely not going to have all intents committed. + // In principle, we can relax this as needed though. + tc.cleanupTxnLocked(ctx, aborted) + duration, restarts, status := tc.finalTxnStatsLocked() + tc.updateStats(duration, restarts, status, false) + return roachpb.NewError(err) + } } } else { // If this was a successful one phase commit, update stats @@ -1068,27 +1022,17 @@ func (tc *TxnCoordSender) updateState( tc.updateStats(tc.clock.PhysicalNow()-startNS, 0, newTxn.Status, ok && etArgs.OnePhaseCommit) } } + tc.mu.meta.Intents = intents + tc.mu.intentsSize = intentsSize } // Update our record of this transaction, even on error. - if txnMeta != nil { - txnMeta.txn.Update(&newTxn) - txnMeta.setLastUpdate(tc.clock.PhysicalNow()) - } + tc.mu.meta.Txn.Update(&newTxn) + tc.mu.lastUpdateNanos = tc.clock.PhysicalNow() return pErr } -// GetTxnState is part of the SenderWithDistSQLBackdoor interface. -func (tc *TxnCoordSender) GetTxnState(txnID uuid.UUID) (roachpb.Transaction, bool) { - tc.txnMu.Lock() - defer tc.txnMu.Unlock() - if txnMeta, ok := tc.txnMu.txns[txnID]; ok { - return txnMeta.txn, true - } - return roachpb.Transaction{}, false -} - // TODO(tschottdorf): this method is somewhat awkward but unless we want to // give this error back to the client, our options are limited. We'll have to // run the whole thing for them, or any restart will still end up at the client @@ -1104,7 +1048,7 @@ func (tc *TxnCoordSender) resendWithTxn( // through here. dbCtx := client.DefaultDBContext() dbCtx.UserPriority = ba.UserPriority - tmpDB := client.NewDBWithContext(tc, tc.clock, dbCtx) + tmpDB := client.NewDBWithContext(tc.TxnCoordSenderFactory, tc.clock, dbCtx) var br *roachpb.BatchResponse err := tmpDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { txn.SetDebugName("auto-wrap") diff --git a/pkg/kv/txn_coord_sender_test.go b/pkg/kv/txn_coord_sender_test.go index de50c09f5f46..9f93903139f6 100644 --- a/pkg/kv/txn_coord_sender_test.go +++ b/pkg/kv/txn_coord_sender_test.go @@ -20,6 +20,7 @@ import ( "fmt" "reflect" "strconv" + "sync/atomic" "testing" "time" @@ -45,38 +46,36 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/uuid" ) -// teardownHeartbeats goes through the coordinator's active transactions and -// has the associated heartbeat tasks quit. This is useful for tests which -// don't finish transactions. This is safe to call multiple times. -func teardownHeartbeats(tc *TxnCoordSender) { +// teardownHeartbeat shuts down the coordinator's heartbeat task, if +// it's not already finished. This is useful for tests which don't +// finish transactions. This is safe to call multiple times. +func teardownHeartbeat(tc *TxnCoordSender) { if r := recover(); r != nil { panic(r) } - tc.txnMu.Lock() - defer tc.txnMu.Unlock() - for _, tm := range tc.txnMu.txns { - if tm.txnEnd != nil { - close(tm.txnEnd) - tm.txnEnd = nil - } + tc.mu.Lock() + defer tc.mu.Unlock() + if tc.mu.txnEnd != nil { + close(tc.mu.txnEnd) + tc.mu.txnEnd = nil } } // createTestDB creates a local test server and starts it. The caller // is responsible for stopping the test server. -func createTestDB(t testing.TB) (*localtestcluster.LocalTestCluster, *TxnCoordSender) { +func createTestDB(t testing.TB) *localtestcluster.LocalTestCluster { return createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), nil) } func createTestDBWithContextAndKnobs( t testing.TB, dbCtx client.DBContext, knobs *storage.StoreTestingKnobs, -) (*localtestcluster.LocalTestCluster, *TxnCoordSender) { +) *localtestcluster.LocalTestCluster { s := &localtestcluster.LocalTestCluster{ DBContext: &dbCtx, StoreTestingKnobs: knobs, } - s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) - return s, s.Sender.(*TxnCoordSender) + s.Start(t, testutils.NewNodeTestBaseContext(), InitFactoryForLocalTestCluster) + return s } // makeTS creates a new timestamp. @@ -92,43 +91,35 @@ func makeTS(walltime int64, logical int32) hlc.Timestamp { // transaction ID updates the last update timestamp. func TestTxnCoordSenderAddRequest(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() - defer teardownHeartbeats(sender) - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) + tc := txn.Sender().(*TxnCoordSender) + defer teardownHeartbeat(tc) // Put request will create a new transaction. if err := txn.Put(context.TODO(), roachpb.Key("a"), []byte("value")); err != nil { t.Fatal(err) } - txnID := txn.Proto().ID - txnMeta, ok := sender.txnMu.txns[txnID] - if !ok { - t.Fatal("expected a transaction to be created on coordinator") - } if !txn.Proto().Writing { t.Fatal("txn is not marked as writing") } - ts := txnMeta.getLastUpdate() - - // Advance time and send another put request. Lock the coordinator - // to prevent a data race. - sender.txnMu.Lock() + tc.mu.Lock() s.Manual.Increment(1) - sender.txnMu.Unlock() + ts := tc.mu.lastUpdateNanos + tc.mu.Unlock() + if err := txn.Put(context.TODO(), roachpb.Key("a"), []byte("value")); err != nil { t.Fatal(err) } - if len(sender.txnMu.txns) != 1 { - t.Errorf("expected length of transactions map to be 1; got %d", len(sender.txnMu.txns)) - } - txnMeta = sender.txnMu.txns[txnID] - if lu := txnMeta.getLastUpdate(); ts >= lu { + tc.mu.Lock() + if lu := tc.mu.lastUpdateNanos; ts >= lu { t.Errorf("expected last update time to advance past %d; got %d", ts, lu) } else if un := s.Manual.UnixNano(); lu != un { t.Errorf("expected last update time to equal %d; got %d", un, lu) } + tc.mu.Unlock() } // TestTxnCoordSenderAddRequestConcurrently verifies adding concurrent requests @@ -136,16 +127,17 @@ func TestTxnCoordSenderAddRequest(t *testing.T) { // transaction ID updates the last update timestamp. func TestTxnCoordSenderAddRequestConcurrently(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() - defer teardownHeartbeats(sender) - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) + tc := txn.Sender().(*TxnCoordSender) + defer teardownHeartbeat(tc) // Put requests will create a new transaction. sendRequests := func() error { // NB: we can't use errgroup.WithContext here because s.DB uses the first - // request's context (if it's cancellable) to determine when the + // request's context (if it's cancelable) to determine when the // transaction is abandoned. Since errgroup.Group.Wait cancels its // context, this would cause the transaction to have been aborted when // this function is called for the second time. See this TODO from @@ -168,44 +160,35 @@ func TestTxnCoordSenderAddRequestConcurrently(t *testing.T) { if err := sendRequests(); err != nil { t.Fatal(err) } - txnID := txn.Proto().ID - txnMeta, ok := sender.txnMu.txns[txnID] - if !ok { - t.Fatal("expected a transaction to be created on coordinator") - } if !txn.Proto().Writing { t.Fatal("txn is not marked as writing") } - ts := txnMeta.getLastUpdate() - - // Advance time and send more put requests. Lock the coordinator - // to prevent a data race. - sender.txnMu.Lock() + tc.mu.Lock() + ts := tc.mu.lastUpdateNanos s.Manual.Increment(1) - sender.txnMu.Unlock() + tc.mu.Unlock() + if err := sendRequests(); err != nil { t.Fatal(err) } - if len(sender.txnMu.txns) != 1 { - t.Errorf("expected length of transactions map to be 1; got %d", len(sender.txnMu.txns)) - } - txnMeta = sender.txnMu.txns[txnID] - if lu := txnMeta.getLastUpdate(); ts >= lu { + + tc.mu.Lock() + if lu := tc.mu.lastUpdateNanos; ts >= lu { t.Errorf("expected last update time to advance past %d; got %d", ts, lu) } else if un := s.Manual.UnixNano(); lu != un { t.Errorf("expected last update time to equal %d; got %d", un, lu) } + tc.mu.Unlock() } // TestTxnCoordSenderBeginTransaction verifies that a command sent with a // not-nil Txn with empty ID gets a new transaction initialized. func TestTxnCoordSenderBeginTransaction(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() - defer teardownHeartbeats(sender) - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) // Put request will create a new transaction. key := roachpb.Key("key") @@ -235,11 +218,10 @@ func TestTxnCoordSenderBeginTransaction(t *testing.T) { // a new transaction, a non-zero priority is treated as a minimum value. func TestTxnCoordSenderBeginTransactionMinPriority(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() - defer teardownHeartbeats(sender) - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) // Put request will create a new transaction. key := roachpb.Key("key") @@ -272,11 +254,13 @@ func TestTxnCoordSenderKeyRanges(t *testing.T) { {roachpb.Key("b"), roachpb.Key("c")}, } - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() - defer teardownHeartbeats(sender) - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) + tc := txn.Sender().(*TxnCoordSender) + defer teardownHeartbeat(tc) + for _, rng := range ranges { if rng.end != nil { if err := txn.DelRange(context.TODO(), rng.start, rng.end); err != nil { @@ -289,17 +273,13 @@ func TestTxnCoordSenderKeyRanges(t *testing.T) { } } - txnID := txn.Proto().ID - // Verify that the transaction metadata contains only two entries - // in its "keys" range group. "a" and range "aa"-"c". - txnMeta, ok := sender.txnMu.txns[txnID] - if !ok { - t.Fatalf("expected a transaction to be created on coordinator") - } - keys, _ := roachpb.MergeSpans(txnMeta.keys) - if len(keys) != 2 { - t.Errorf("expected 2 entries in keys range group; got %v", keys) + // in its intents slice. "a" and range "aa"-"c". + tc.mu.Lock() + intents, _ := roachpb.MergeSpans(tc.mu.meta.Intents) + tc.mu.Unlock() + if len(intents) != 2 { + t.Errorf("expected 2 entries in keys range group; got %v", intents) } } @@ -320,21 +300,21 @@ func TestTxnCoordSenderCondenseIntentSpans(t *testing.T) { g0Tog1 := roachpb.Span{Key: roachpb.Key("g0"), EndKey: roachpb.Key("g1")} fTog1Closed := roachpb.Span{Key: roachpb.Key("f"), EndKey: roachpb.Key("g1")} testCases := []struct { - span roachpb.Span - expKeys []roachpb.Span - expKeysSize int64 + span roachpb.Span + expIntents []roachpb.Span + expIntentsSize int64 }{ - {span: a, expKeys: []roachpb.Span{a}, expKeysSize: 1}, - {span: b, expKeys: []roachpb.Span{a, b}, expKeysSize: 2}, - {span: c, expKeys: []roachpb.Span{a, b, c}, expKeysSize: 3}, - {span: d, expKeys: []roachpb.Span{a, b, c, d}, expKeysSize: 9}, + {span: a, expIntents: []roachpb.Span{a}, expIntentsSize: 1}, + {span: b, expIntents: []roachpb.Span{a, b}, expIntentsSize: 2}, + {span: c, expIntents: []roachpb.Span{a, b, c}, expIntentsSize: 3}, + {span: d, expIntents: []roachpb.Span{a, b, c, d}, expIntentsSize: 9}, // Note that c-e condenses and then lists first. - {span: e, expKeys: []roachpb.Span{cToEClosed, a, b}, expKeysSize: 5}, - {span: fTof0, expKeys: []roachpb.Span{cToEClosed, a, b, fTof0}, expKeysSize: 8}, - {span: g, expKeys: []roachpb.Span{cToEClosed, a, b, fTof0, g}, expKeysSize: 9}, - {span: g0Tog1, expKeys: []roachpb.Span{fTog1Closed, cToEClosed, aToBClosed}, expKeysSize: 9}, + {span: e, expIntents: []roachpb.Span{cToEClosed, a, b}, expIntentsSize: 5}, + {span: fTof0, expIntents: []roachpb.Span{cToEClosed, a, b, fTof0}, expIntentsSize: 8}, + {span: g, expIntents: []roachpb.Span{cToEClosed, a, b, fTof0, g}, expIntentsSize: 9}, + {span: g0Tog1, expIntents: []roachpb.Span{fTog1Closed, cToEClosed, aToBClosed}, expIntentsSize: 9}, // Add a key in the middle of a span, which will get merged on commit. - {span: c, expKeys: []roachpb.Span{cToEClosed, aToBClosed, fTog1Closed}, expKeysSize: 9}, + {span: c, expIntents: []roachpb.Span{cToEClosed, aToBClosed, fTog1Closed}, expIntentsSize: 9}, } splits := []roachpb.Span{ {Key: roachpb.Key("a"), EndKey: roachpb.Key("c")}, @@ -351,11 +331,10 @@ func TestTxnCoordSenderCondenseIntentSpans(t *testing.T) { }) } descDB := mockRangeDescriptorDBForDescs(descs...) - s, txnCoord := createTestDB(t) + s := createTestDB(t) st := s.Store.ClusterSettings() maxTxnIntentsBytes.Override(&st.SV, 10) /* 10 bytes and it will condense */ defer s.Stop() - defer teardownHeartbeats(txnCoord) // Check end transaction intents, which should exclude the intent at // key "c" as it's merged with the cToEClosed span. @@ -378,10 +357,19 @@ func TestTxnCoordSenderCondenseIntentSpans(t *testing.T) { }, RangeDescriptorDB: descDB, } - txnCoord.wrapped = NewDistSender(cfg, s.Gossip) + ambient := log.AmbientContext{Tracer: tracing.NewTracer()} + tsf := NewTxnCoordSenderFactory( + ambient, + st, + NewDistSender(cfg, s.Gossip), + s.Clock, + false, /* linearizable */ + s.Stopper, + MakeTxnMetrics(metric.TestSampleInterval), + ) + db := client.NewDB(tsf, s.Clock) - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) - txnID := txn.Proto().ID + txn := client.NewTxn(db, 0 /* gatewayNodeID */, client.RootTxn) for i, tc := range testCases { if tc.span.EndKey != nil { if err := txn.DelRange(context.TODO(), tc.span.Key, tc.span.EndKey); err != nil { @@ -392,41 +380,17 @@ func TestTxnCoordSenderCondenseIntentSpans(t *testing.T) { t.Fatal(err) } } - txnCoord.txnMu.Lock() - txnMeta, ok := txnCoord.txnMu.txns[txnID] - if !ok { - t.Fatalf("expected a transaction to be created on coordinator") + meta := txn.GetTxnCoordMeta() + if a, e := meta.Intents, tc.expIntents; !reflect.DeepEqual(a, e) { + t.Errorf("%d: expected keys %+v; got %+v", i, e, a) } - if a, e := txnMeta.keysSize, tc.expKeysSize; a != e { - t.Errorf("%d: keys size expected %d; got %d", i, e, a) + intentsSize := int64(0) + for _, i := range meta.Intents { + intentsSize += int64(len(i.Key) + len(i.EndKey)) } - if a, e := txnMeta.keys, tc.expKeys; !reflect.DeepEqual(a, e) { - t.Errorf("%d: expected keys %+v; got %+v", i, e, a) + if a, e := intentsSize, tc.expIntentsSize; a != e { + t.Errorf("%d: keys size expected %d; got %d", i, e, a) } - txnCoord.txnMu.Unlock() - } -} - -// TestTxnCoordSenderMultipleTxns verifies correct operation with -// multiple outstanding transactions. -func TestTxnCoordSenderMultipleTxns(t *testing.T) { - defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) - defer s.Stop() - defer teardownHeartbeats(sender) - - txn1 := client.NewTxn(s.DB, 0 /* gatewayNodeID */) - txn2 := client.NewTxn(s.DB, 0 /* gatewayNodeID */) - - if err := txn1.Put(context.TODO(), roachpb.Key("a"), []byte("value")); err != nil { - t.Fatal(err) - } - if err := txn2.Put(context.TODO(), roachpb.Key("b"), []byte("value")); err != nil { - t.Fatal(err) - } - - if len(sender.txnMu.txns) != 2 { - t.Errorf("expected length of transactions map to be 2; got %d", len(sender.txnMu.txns)) } } @@ -434,14 +398,15 @@ func TestTxnCoordSenderMultipleTxns(t *testing.T) { // transaction record. func TestTxnCoordSenderHeartbeat(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() - defer teardownHeartbeats(sender) + initialTxn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) + tc := initialTxn.Sender().(*TxnCoordSender) + defer teardownHeartbeat(tc) // Set heartbeat interval to 1ms for testing. - sender.heartbeatInterval = 1 * time.Millisecond + tc.TxnCoordSenderFactory.heartbeatInterval = 1 * time.Millisecond - initialTxn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) if err := initialTxn.Put(context.TODO(), roachpb.Key("a"), []byte("value")); err != nil { t.Fatal(err) } @@ -450,15 +415,15 @@ func TestTxnCoordSenderHeartbeat(t *testing.T) { var heartbeatTS hlc.Timestamp for i := 0; i < 3; i++ { testutils.SucceedsSoon(t, func() error { - txn, pErr := getTxn(sender, initialTxn.Proto()) + txn, pErr := getTxn(s.DB, initialTxn.Proto()) if pErr != nil { t.Fatal(pErr) } // Advance clock by 1ns. // Locking the TxnCoordSender to prevent a data race. - sender.txnMu.Lock() + tc.mu.Lock() s.Manual.Increment(1) - sender.txnMu.Unlock() + tc.mu.Unlock() if lastActive := txn.LastActive(); heartbeatTS.Less(lastActive) { heartbeatTS = lastActive return nil @@ -475,17 +440,17 @@ func TestTxnCoordSenderHeartbeat(t *testing.T) { Span: roachpb.Span{Key: initialTxn.Proto().Key}, }) ba.Txn = initialTxn.Proto() - if _, pErr := sender.wrapped.Send(context.Background(), ba); pErr != nil { + if _, pErr := tc.TxnCoordSenderFactory.wrapped.Send(context.Background(), ba); pErr != nil { t.Fatal(pErr) } } + // Verify that the abort is discovered and the heartbeat discontinued. testutils.SucceedsSoon(t, func() error { - sender.txnMu.Lock() - defer sender.txnMu.Unlock() - if txnMeta, ok := sender.txnMu.txns[initialTxn.Proto().ID]; !ok { - t.Fatal("transaction unregistered prematurely") - } else if txnMeta.txn.Status != roachpb.ABORTED { + tc.mu.Lock() + done := tc.mu.txnEnd == nil + tc.mu.Unlock() + if !done { return fmt.Errorf("transaction is not aborted") } return nil @@ -497,15 +462,13 @@ func TestTxnCoordSenderHeartbeat(t *testing.T) { } // getTxn fetches the requested key and returns the transaction info. -func getTxn( - coord *TxnCoordSender, txn *roachpb.Transaction, -) (*roachpb.Transaction, *roachpb.Error) { +func getTxn(db *client.DB, txn *roachpb.Transaction) (*roachpb.Transaction, *roachpb.Error) { hb := &roachpb.HeartbeatTxnRequest{ Span: roachpb.Span{ Key: txn.Key, }, } - reply, pErr := client.SendWrappedWith(context.Background(), coord, roachpb.Header{ + reply, pErr := client.SendWrappedWith(context.Background(), db.GetSender(), roachpb.Header{ Txn: txn, }, hb) if pErr != nil { @@ -514,13 +477,15 @@ func getTxn( return reply.(*roachpb.HeartbeatTxnResponse).Txn, nil } -func verifyCleanup(key roachpb.Key, coord *TxnCoordSender, eng engine.Engine, t *testing.T) { +func verifyCleanup(key roachpb.Key, eng engine.Engine, t *testing.T, coords ...*TxnCoordSender) { testutils.SucceedsSoon(t, func() error { - coord.txnMu.Lock() - l := len(coord.txnMu.txns) - coord.txnMu.Unlock() - if l != 0 { - return fmt.Errorf("expected empty transactions map; got %d", l) + for _, coord := range coords { + coord.mu.Lock() + hb := coord.mu.txnEnd != nil + coord.mu.Unlock() + if hb { + return fmt.Errorf("expected no heartbeat") + } } meta := &enginepb.MVCCMetadata{} ok, _, _, err := eng.GetProto(engine.MakeMVCCMetadataKey(key), meta) @@ -539,13 +504,13 @@ func verifyCleanup(key roachpb.Key, coord *TxnCoordSender, eng engine.Engine, t // from the txns map. func TestTxnCoordSenderEndTxn(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() // 4 cases: no deadline, past deadline, equal deadline, future deadline. for i := 0; i < 4; i++ { key := roachpb.Key("key: " + strconv.Itoa(i)) - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) // Set to SNAPSHOT so that it can be pushed without restarting. if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil { t.Fatal(err) @@ -555,7 +520,7 @@ func TestTxnCoordSenderEndTxn(t *testing.T) { t.Fatal(pErr) } // Conflicting transaction that pushes the above transaction. - conflictTxn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + conflictTxn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) if _, pErr := conflictTxn.Get(context.TODO(), key); pErr != nil { t.Fatal(pErr) } @@ -563,7 +528,7 @@ func TestTxnCoordSenderEndTxn(t *testing.T) { // The transaction was pushed at least to conflictTxn's timestamp (but // it could have been pushed more - the push takes a timestamp off the // HLC). - pusheeTxn, pErr := getTxn(sender, txn.Proto()) + pusheeTxn, pErr := getTxn(s.DB, txn.Proto()) if pErr != nil { t.Fatal(pErr) } @@ -621,7 +586,7 @@ func TestTxnCoordSenderEndTxn(t *testing.T) { } } } - verifyCleanup(key, sender, s.Eng, t) + verifyCleanup(key, s.Eng, t, txn.Sender().(*TxnCoordSender)) } } @@ -629,12 +594,14 @@ func TestTxnCoordSenderEndTxn(t *testing.T) { // the transaction is, even on error. func TestTxnCoordSenderAddIntentOnError(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() // Create a transaction with intent at "x". key := roachpb.Key("x") - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) + tc := txn.Sender().(*TxnCoordSender) + defer teardownHeartbeat(tc) // Write so that the coordinator begins tracking this txn. if err := txn.Put(context.TODO(), "x", "y"); err != nil { t.Fatal(err) @@ -643,12 +610,11 @@ func TestTxnCoordSenderAddIntentOnError(t *testing.T) { if !ok { t.Fatal(err) } - sender.txnMu.Lock() - txnID := txn.Proto().ID - intentSpans, _ := roachpb.MergeSpans(sender.txnMu.txns[txnID].keys) + tc.mu.Lock() + intentSpans, _ := roachpb.MergeSpans(tc.mu.meta.Intents) expSpans := []roachpb.Span{{Key: key, EndKey: []byte("")}} equal := !reflect.DeepEqual(intentSpans, expSpans) - sender.txnMu.Unlock() + tc.mu.Unlock() if err := txn.Rollback(context.TODO()); err != nil { t.Fatal(err) } @@ -683,18 +649,18 @@ func assertTransactionAbortedError(t *testing.T, e error) { // TransactionAbortedError, the coordinator cleans up the transaction. func TestTxnCoordSenderCleanupOnAborted(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() // Create a transaction with intent at "a". key := roachpb.Key("a") - txn1 := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn1 := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) if err := txn1.Put(context.TODO(), key, []byte("value")); err != nil { t.Fatal(err) } // Push the transaction (by writing key "a" with higher priority) to abort it. - txn2 := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn2 := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) if err := txn2.SetUserPriority(roachpb.MaxUserPriority); err != nil { t.Fatal(err) } @@ -709,18 +675,19 @@ func TestTxnCoordSenderCleanupOnAborted(t *testing.T) { if err := txn2.CommitOrCleanup(context.TODO()); err != nil { t.Fatal(err) } - verifyCleanup(key, sender, s.Eng, t) + verifyCleanup(key, s.Eng, t, txn1.Sender().(*TxnCoordSender), txn2.Sender().(*TxnCoordSender)) } func TestTxnCoordSenderCancel(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() ctx, cancel := context.WithCancel(context.Background()) - origSender := sender.wrapped - sender.wrapped = client.SenderFunc( + tc := s.DB.GetSender().(*TxnCoordSender) + origSender := tc.TxnCoordSenderFactory.wrapped + tc.TxnCoordSenderFactory.wrapped = client.SenderFunc( func(ctx context.Context, args roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if _, hasET := args.GetArg(roachpb.EndTransaction); hasET { // Cancel the transaction while also sending it along. This tickled a @@ -731,7 +698,7 @@ func TestTxnCoordSenderCancel(t *testing.T) { }) // Create a transaction with bunch of intents. - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) batch := txn.NewBatch() for i := 0; i < 100; i++ { key := roachpb.Key(fmt.Sprintf("%d", i)) @@ -756,13 +723,14 @@ func TestTxnCoordSenderCancel(t *testing.T) { // transactions and intents after the lastUpdateNanos exceeds the timeout. func TestTxnCoordSenderGCTimeout(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() // Set heartbeat interval to 1ms for testing. - sender.heartbeatInterval = 1 * time.Millisecond + tc := s.DB.GetSender().(*TxnCoordSender) + tc.TxnCoordSenderFactory.heartbeatInterval = 1 * time.Millisecond - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) key := roachpb.Key("a") if err := txn.Put(context.TODO(), key, []byte("value")); err != nil { t.Fatal(err) @@ -770,61 +738,64 @@ func TestTxnCoordSenderGCTimeout(t *testing.T) { // Now, advance clock past the default client timeout. // Locking the TxnCoordSender to prevent a data race. - sender.txnMu.Lock() + tc.mu.Lock() s.Manual.Increment(defaultClientTimeout.Nanoseconds() + 1) - sender.txnMu.Unlock() - - txnID := txn.Proto().ID + tc.mu.Unlock() testutils.SucceedsSoon(t, func() error { // Locking the TxnCoordSender to prevent a data race. - sender.txnMu.Lock() - _, ok := sender.txnMu.txns[txnID] - sender.txnMu.Unlock() - if ok { + tc.mu.Lock() + done := tc.mu.txnEnd == nil + tc.mu.Unlock() + if !done { return errors.Errorf("expected garbage collection") } return nil }) - verifyCleanup(key, sender, s.Eng, t) + verifyCleanup(key, s.Eng, t, tc) } // TestTxnCoordSenderGCWithCancel verifies that the coordinator cleans up extant // transactions and intents after transaction context is canceled. func TestTxnCoordSenderGCWithCancel(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() - // Set heartbeat interval to 1ms for testing. - sender.heartbeatInterval = 1 * time.Millisecond - ctx, cancel := context.WithCancel(context.Background()) - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) + tc := txn.Sender().(*TxnCoordSender) + // Set heartbeat interval to 1ms for testing. + tc.TxnCoordSenderFactory.heartbeatInterval = 1 * time.Millisecond key := roachpb.Key("a") if pErr := txn.Put(ctx, key, []byte("value")); pErr != nil { t.Fatal(pErr) } + // Wait for heartbeat to kick in after Put. + testutils.SucceedsSoon(t, func() error { + tc.mu.Lock() + defer tc.mu.Unlock() + if tc.mu.txnEnd != nil { + return nil + } + return errors.Errorf("expected heartbeat to start") + }) // Now, advance clock past the default client timeout. // Locking the TxnCoordSender to prevent a data race. - sender.txnMu.Lock() + tc.mu.Lock() s.Manual.Increment(defaultClientTimeout.Nanoseconds() + 1) - sender.txnMu.Unlock() + tc.mu.Unlock() - txnID := txn.Proto().ID - - // Verify that the transaction is alive despite the timeout having been - // exceeded. + // Verify that the transaction is alive despite the timeout having + // been exceeded. errStillActive := errors.New("transaction is still active") - // TODO(dan): Figure out how to run the heartbeat manually instead of this. if err := retry.ForDuration(1*time.Second, func() error { - // Locking the TxnCoordSender to prevent a data race. - sender.txnMu.Lock() - _, ok := sender.txnMu.txns[txnID] - sender.txnMu.Unlock() - if !ok { + tc.mu.Lock() + done := tc.mu.txnEnd == nil + tc.mu.Unlock() + if done { return nil } meta := &enginepb.MVCCMetadata{} @@ -840,20 +811,9 @@ func TestTxnCoordSenderGCWithCancel(t *testing.T) { t.Fatalf("expected transaction to be active, got: %v", err) } - // After the context is canceled, the transaction should be cleaned up. + // After the context is canceled, the heartbeat should stop. cancel() - testutils.SucceedsSoon(t, func() error { - // Locking the TxnCoordSender to prevent a data race. - sender.txnMu.Lock() - _, ok := sender.txnMu.txns[txnID] - sender.txnMu.Unlock() - if ok { - return errors.Errorf("expected garbage collection") - } - return nil - }) - - verifyCleanup(key, sender, s.Eng, t) + verifyCleanup(key, s.Eng, t, tc) } // TestTxnCoordSenderGCWithAmbiguousResultErr verifies that the coordinator @@ -876,11 +836,13 @@ func TestTxnCoordSenderGCWithAmbiguousResultErr(t *testing.T) { }, } - s, sender := createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), knobs) + s := createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), knobs) defer s.Stop() ctx, cancel := context.WithCancel(context.Background()) - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) + tc := txn.Sender().(*TxnCoordSender) + defer teardownHeartbeat(tc) if !errOnFirst { otherKey := roachpb.Key("other") if err := txn.Put(ctx, otherKey, []byte("value")); err != nil { @@ -890,22 +852,21 @@ func TestTxnCoordSenderGCWithAmbiguousResultErr(t *testing.T) { if err := txn.Put(ctx, key, []byte("value")); !testutils.IsError(err, "result is ambiguous") { t.Fatalf("expected error %v, found %v", are, err) } - txnID := txn.Proto().ID // After the context is canceled, the transaction should be cleaned up. cancel() testutils.SucceedsSoon(t, func() error { // Locking the TxnCoordSender to prevent a data race. - sender.txnMu.Lock() - _, ok := sender.txnMu.txns[txnID] - sender.txnMu.Unlock() - if ok { + tc.mu.Lock() + done := tc.mu.txnEnd == nil + tc.mu.Unlock() + if !done { return errors.Errorf("expected garbage collection") } return nil }) - verifyCleanup(key, sender, s.Eng, t) + verifyCleanup(key, s.Eng, t, tc) }) } @@ -1020,16 +981,16 @@ func TestTxnCoordSenderTxnUpdatedOnError(t *testing.T) { return reply, pErr } ambient := log.AmbientContext{Tracer: tracing.NewTracer()} - ts := NewTxnCoordSender( + tsf := NewTxnCoordSenderFactory( ambient, cluster.MakeTestingClusterSettings(), senderFn, clock, - false, + false, /* linearizable */ stopper, MakeTxnMetrics(metric.TestSampleInterval), ) - db := client.NewDB(ts, clock) + db := client.NewDB(tsf, clock) key := roachpb.Key("test-key") origTxnProto := roachpb.MakeTransaction( "test txn", @@ -1048,11 +1009,10 @@ func TestTxnCoordSenderTxnUpdatedOnError(t *testing.T) { // for assigning exact priorities doesn't work properly when faced with // updates. origTxnProto.Priority = 1 - txn := client.NewTxnWithProto(db, 0 /* gatewayNodeID */, origTxnProto) + txn := client.NewTxnWithProto(db, 0 /* gatewayNodeID */, client.RootTxn, origTxnProto) txn.InternalSetPriority(1) - _, err := txn.Get(context.TODO(), key) - teardownHeartbeats(ts) + err := txn.Put(context.TODO(), key, []byte("value")) stopper.Stop(context.TODO()) if test.name != "nil" && err == nil { @@ -1089,22 +1049,23 @@ func TestTxnCoordSenderTxnUpdatedOnError(t *testing.T) { // TestTxnCoordIdempotentCleanup verifies that cleanupTxnLocked is idempotent. func TestTxnCoordIdempotentCleanup(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() - defer teardownHeartbeats(sender) - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) + tc := txn.Sender().(*TxnCoordSender) + defer teardownHeartbeat(tc) ba := txn.NewBatch() ba.Put(roachpb.Key("a"), []byte("value")) if err := txn.Run(context.TODO(), ba); err != nil { t.Fatal(err) } - sender.txnMu.Lock() + tc.mu.Lock() // Clean up twice successively. - sender.cleanupTxnLocked(context.Background(), *txn.Proto()) - sender.cleanupTxnLocked(context.Background(), *txn.Proto()) - sender.txnMu.Unlock() + tc.cleanupTxnLocked(context.Background(), aborted) + tc.cleanupTxnLocked(context.Background(), aborted) + tc.mu.Unlock() // For good measure, try to commit (which cleans up once more if it // succeeds, which it may not if the previous cleanup has already @@ -1112,62 +1073,76 @@ func TestTxnCoordIdempotentCleanup(t *testing.T) { ba = txn.NewBatch() ba.AddRawRequest(&roachpb.EndTransactionRequest{}) err := txn.Run(context.TODO(), ba) - if _, ok := err.(*roachpb.UntrackedTxnError); err != nil && !ok { - t.Fatal(err) - } + assertTransactionAbortedError(t, err) } -// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to -// enforce that only one coordinator can be used for transactional writes. +// TestTxnMultipleCoord checks that multiple txn coordinators can be +// used by a single transaction, and their state can be combined. func TestTxnMultipleCoord(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) + s := createTestDB(t) defer s.Stop() - testCases := []struct { - args roachpb.Request - writing bool - ok bool - }{ - {roachpb.NewGet(roachpb.Key("a")), true /* writing */, false /* not ok */}, - {roachpb.NewGet(roachpb.Key("a")), false /* not writing */, true /* ok */}, - // transactional write before begin - {roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), false /* not writing */, true /* ok */}, - // must have switched coordinators - {roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), true /* writing */, false /* not ok */}, + ctx := context.Background() + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) + tc := txn.Sender().(*TxnCoordSender) + defer teardownHeartbeat(tc) + + // Start the transaction. + key := roachpb.Key("a") + if err := txn.Put(ctx, key, []byte("value")); err != nil { + t.Fatalf("expected error %s", err) } - for i, tc := range testCases { - txn := roachpb.MakeTransaction("test", roachpb.Key("a"), 1, enginepb.SERIALIZABLE, - s.Clock.Now(), s.Clock.MaxOffset().Nanoseconds()) - txn.Writing = tc.writing - reply, pErr := client.SendWrappedWith(context.Background(), sender, roachpb.Header{ - Txn: &txn, - }, tc.args) - if pErr == nil != tc.ok { - t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v", - i, tc.args, tc.writing, tc.ok, pErr) - } - if pErr != nil { - continue - } + // New create a second, leaf coordinator. + txn2 := client.NewTxnWithProto(s.DB, 0 /* gatewayNodeID */, client.LeafTxn, *txn.Proto()) + tc2 := txn2.Sender().(*TxnCoordSender) + defer teardownHeartbeat(tc2) - txn = *reply.Header().Txn - if tc.writing != txn.Writing { - t.Errorf("%d: unexpected writing state: %s", i, txn) - } - if !tc.writing { - continue - } - // Abort for clean shutdown. - if _, pErr := client.SendWrappedWith(context.Background(), sender, roachpb.Header{ - Txn: &txn, - }, &roachpb.EndTransactionRequest{ - Commit: false, - }); pErr != nil { - t.Fatal(pErr) - } + // Start the second transaction. + key2 := roachpb.Key("b") + if err := txn2.Put(ctx, key2, []byte("value2")); err != nil { + t.Fatalf("expected error %s", err) + } + + // Verify heartbeat started on root txn. + tc.mu.Lock() + if tc.mu.txnEnd == nil { + t.Fatalf("expected heartbeat on root coordinator") + } + tc.mu.Unlock() + // Verify no heartbeat started on leaf txn. + tc2.mu.Lock() + if tc2.mu.txnEnd != nil { + t.Fatalf("unexpected heartbeat on leaf coordinator") + } + tc2.mu.Unlock() + + // Verify it's an error to commit on the leaf txn node. + ba := txn2.NewBatch() + ba.AddRawRequest(&roachpb.EndTransactionRequest{Commit: true}) + if err := txn2.Run(context.TODO(), ba); !testutils.IsError(err, "cannot commit on a leaf transaction coordinator") { + t.Fatalf("expected cannot commit on leaf coordinator error; got %v", err) + } + + // Augment txn with txn2's meta & commit. + txn.AugmentTxnCoordMeta(txn2.GetTxnCoordMeta()) + // Verify presence of both intents. + tc.mu.Lock() + if a, e := tc.mu.meta.Intents, []roachpb.Span{{Key: key}, {Key: key2}}; !reflect.DeepEqual(a, e) { + t.Fatalf("expected intents %+v; got %+v", e, a) } + if a, e := tc.mu.intentsSize, int64(len(key)+len(key2)); a != e { + t.Fatalf("expected intentsSize %d; got %d", e, a) + } + tc.mu.Unlock() + ba = txn.NewBatch() + ba.AddRawRequest(&roachpb.EndTransactionRequest{Commit: true}) + if err := txn.Run(context.TODO(), ba); err != nil { + t.Fatal(err) + } + + verifyCleanup(key, s.Eng, t, tc, tc2) } // TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely @@ -1187,10 +1162,11 @@ func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) { return br, nil } ambient := log.AmbientContext{Tracer: tracing.NewTracer()} - ts := NewTxnCoordSender( + factory := NewTxnCoordSenderFactory( ambient, cluster.MakeTestingClusterSettings(), senderFn, clock, false, stopper, MakeTxnMetrics(metric.TestSampleInterval), ) + tc := factory.New(client.RootTxn) // Stop the stopper manually, prior to trying the transaction. This has the // effect of returning a NodeUnavailableError for any attempts at launching @@ -1204,7 +1180,7 @@ func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) { ba.Add(&roachpb.EndTransactionRequest{}) txn := roachpb.MakeTransaction("test", key, 0, 0, clock.Now(), 0) ba.Txn = &txn - _, pErr := ts.Send(context.Background(), ba) + _, pErr := tc.Send(context.Background(), ba) if pErr != nil { t.Fatal(pErr) } @@ -1243,7 +1219,7 @@ func TestTxnCoordSenderErrorWithIntent(t *testing.T) { return nil, pErr } ambient := log.AmbientContext{Tracer: tracing.NewTracer()} - ts := NewTxnCoordSender( + factory := NewTxnCoordSenderFactory( ambient, cluster.MakeTestingClusterSettings(), senderFn, @@ -1252,6 +1228,8 @@ func TestTxnCoordSenderErrorWithIntent(t *testing.T) { stopper, MakeTxnMetrics(metric.TestSampleInterval), ) + tc := factory.New(client.RootTxn) + defer teardownHeartbeat(tc.(*TxnCoordSender)) var ba roachpb.BatchRequest key := roachpb.Key("test") @@ -1260,43 +1238,14 @@ func TestTxnCoordSenderErrorWithIntent(t *testing.T) { ba.Add(&roachpb.EndTransactionRequest{}) txn := roachpb.MakeTransaction("test", key, 0, 0, clock.Now(), 0) ba.Txn = &txn - _, pErr := ts.Send(context.Background(), ba) + _, pErr := tc.Send(context.Background(), ba) if !testutils.IsPError(pErr, test.errMsg) { t.Errorf("%d: error did not match %s: %v", i, test.errMsg, pErr) } - - defer teardownHeartbeats(ts) - ts.txnMu.Lock() - defer ts.txnMu.Unlock() - if len(ts.txnMu.txns) != 1 { - t.Errorf("%d: expected transaction to be tracked", i) - } }() } } -// TestTxnCoordSenderReleaseTxnMeta verifies that TxnCoordSender releases the -// txnMetadata after the txn has committed successfully. -func TestTxnCoordSenderReleaseTxnMeta(t *testing.T) { - defer leaktest.AfterTest(t)() - s, sender := createTestDB(t) - defer s.Stop() - defer teardownHeartbeats(sender) - - txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */) - ba := txn.NewBatch() - ba.Put(roachpb.Key("a"), []byte("value")) - ba.Put(roachpb.Key("b"), []byte("value")) - if err := txn.CommitInBatch(context.TODO(), ba); err != nil { - t.Fatal(err) - } - - txnID := txn.Proto().ID - if _, ok := sender.txnMu.txns[txnID]; ok { - t.Fatal("expected TxnCoordSender has released the txn") - } -} - // TestTxnCoordSenderNoDuplicateIntents verifies that TxnCoordSender does not // generate duplicate intents and that it merges intents for overlapping ranges. func TestTxnCoordSenderNoDuplicateIntents(t *testing.T) { @@ -1322,7 +1271,7 @@ func TestTxnCoordSenderNoDuplicateIntents(t *testing.T) { return br, nil } ambient := log.AmbientContext{Tracer: tracing.NewTracer()} - ts := NewTxnCoordSender( + factory := NewTxnCoordSenderFactory( ambient, cluster.MakeTestingClusterSettings(), senderFn, @@ -1331,12 +1280,10 @@ func TestTxnCoordSenderNoDuplicateIntents(t *testing.T) { stopper, MakeTxnMetrics(metric.TestSampleInterval), ) - defer stopper.Stop(context.TODO()) - defer teardownHeartbeats(ts) - db := client.NewDB(ts, clock) - txn := client.NewTxn(db, 0 /* gatewayNodeID */) + db := client.NewDB(factory, clock) + txn := client.NewTxn(db, 0 /* gatewayNodeID */, client.RootTxn) // Write to a, b, u-w before the final batch. @@ -1378,12 +1325,10 @@ func TestTxnCoordSenderNoDuplicateIntents(t *testing.T) { // the TxnCoordSender's asynchronous updating of metrics after a transaction ends. func checkTxnMetrics( t *testing.T, - sender *TxnCoordSender, + metrics TxnMetrics, name string, commits, commits1PC, abandons, aborts, restarts int64, ) { - metrics := sender.metrics - testutils.SucceedsSoon(t, func() error { testcases := []struct { name string @@ -1423,19 +1368,14 @@ func checkTxnMetrics( }) } -// setupMetricsTest returns a TxnCoordSender and ManualClock pointing to a newly created -// LocalTestCluster. Also returns a cleanup function to be executed at the end of the -// test. -func setupMetricsTest(t *testing.T) (*localtestcluster.LocalTestCluster, *TxnCoordSender, func()) { - s, testSender := createTestDB(t) - st := s.Cfg.Settings - txnMetrics := MakeTxnMetrics(metric.TestSampleInterval) - ambient := log.AmbientContext{Tracer: st.Tracer} - sender := NewTxnCoordSender(ambient, st, - testSender.wrapped, s.Clock, false, s.Stopper, txnMetrics) - - return s, sender, func() { - teardownHeartbeats(sender) +// setupMetricsTest sets the txn coord sender factory's metrics to +// have a faster sample interval and returns a cleanup function to be +// executed by callers. +func setupMetricsTest(t *testing.T) (*localtestcluster.LocalTestCluster, TxnMetrics, func()) { + s := createTestDB(t) + metrics := MakeTxnMetrics(metric.TestSampleInterval) + s.DB.GetSender().(*TxnCoordSender).TxnCoordSenderFactory.metrics = metrics + return s, metrics, func() { s.Stop() } } @@ -1445,13 +1385,12 @@ func setupMetricsTest(t *testing.T) (*localtestcluster.LocalTestCluster, *TxnCoo // function as other tests do. func TestTxnCommit(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender, cleanupFn := setupMetricsTest(t) + s, metrics, cleanupFn := setupMetricsTest(t) defer cleanupFn() value := []byte("value") - db := client.NewDB(sender, s.Clock) // Test normal commit. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { key := []byte("key-commit") if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil { @@ -1466,19 +1405,17 @@ func TestTxnCommit(t *testing.T) { }); err != nil { t.Fatal(err) } - teardownHeartbeats(sender) - checkTxnMetrics(t, sender, "commit txn", 1, 0 /* not 1PC */, 0, 0, 0) + checkTxnMetrics(t, metrics, "commit txn", 1, 0 /* not 1PC */, 0, 0, 0) } // TestTxnOnePhaseCommit verifies that 1PC metric tracking works. func TestTxnOnePhaseCommit(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender, cleanupFn := setupMetricsTest(t) + s, metrics, cleanupFn := setupMetricsTest(t) defer cleanupFn() value := []byte("value") - db := client.NewDB(sender, s.Clock) - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { key := []byte("key-commit") b := txn.NewBatch() b.Put(key, value) @@ -1486,33 +1423,48 @@ func TestTxnOnePhaseCommit(t *testing.T) { }); err != nil { t.Fatal(err) } - teardownHeartbeats(sender) - checkTxnMetrics(t, sender, "commit 1PC txn", 1, 1 /* 1PC */, 0, 0, 0) + checkTxnMetrics(t, metrics, "commit 1PC txn", 1, 1 /* 1PC */, 0, 0, 0) +} + +// createNonCancelableDB returns a client DB and a sender. The sender +// will be used for every transaction sent through the DB, so use with +// care. The point is that every invocation of TxnCoordSender.Send will +// be supplied a context with Done()==nil. Note that the returned +// database will use a factory with ridiculously short client timeout +// and heartbeat interval settings to make tests fast. +func createNonCancelableDB(db *client.DB) (*client.DB, *TxnCoordSender) { + // Create the single txn coord for this test. + tc := db.GetSender().(*TxnCoordSender) + tc.TxnCoordSenderFactory.heartbeatInterval = 2 * time.Millisecond + tc.TxnCoordSenderFactory.clientTimeout = 1 * time.Millisecond + + // client.Txn supplies a non-cancelable context, which makes the + // transaction coordinator ignore timeout-based abandonment and use the + // context's lifetime instead. In this test, we are testing timeout-based + // abandonment, so we need to supply a non-cancelable context. + var factory client.TxnSenderFactoryFunc = func(_ client.TxnType) client.TxnSender { + return client.TxnSenderFunc(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + return tc.Send(context.Background(), ba) + }) + } + return client.NewDB(factory, tc.TxnCoordSenderFactory.clock), tc } func TestTxnAbandonCount(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender, cleanupFn := setupMetricsTest(t) + s, metrics, cleanupFn := setupMetricsTest(t) defer cleanupFn() value := []byte("value") manual := s.Manual - // client.Txn supplies a non-cancellable context, which makes the - // transaction coordinator ignore timeout-based abandonment and use the - // context's lifetime instead. In this test, we are testing timeout-based - // abandonment, so we need to supply a non-cancellable context. - var senderFn client.SenderFunc = func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { - return sender.Send(context.TODO(), ba) - } - - db := client.NewDB(senderFn, s.Clock) - - // Test abandoned transaction by making the client timeout ridiculously short. We also set - // the sender to heartbeat very frequently, because the heartbeat detects and tears down - // abandoned transactions. - sender.heartbeatInterval = 2 * time.Millisecond - sender.clientTimeout = 1 * time.Millisecond - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + var count int + doneErr := errors.New("retry on abandoned successful; exiting") + db, tc := createNonCancelableDB(s.DB) + if err := db.Txn(context.Background(), func(ctx context.Context, txn *client.Txn) error { + count++ + if count == 2 { + return doneErr + } key := []byte("key-abandon") if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil { @@ -1523,12 +1475,12 @@ func TestTxnAbandonCount(t *testing.T) { return err } - manual.Increment(int64(sender.clientTimeout + sender.heartbeatInterval*2)) + manual.Increment(int64(tc.TxnCoordSenderFactory.clientTimeout + tc.TxnCoordSenderFactory.heartbeatInterval*2)) - checkTxnMetrics(t, sender, "abandon txn", 0, 0, 1, 0, 0) + checkTxnMetrics(t, metrics, "abandon txn", 0, 0, 1, 0, 0) return nil - }); !testutils.IsError(err, "writing transaction timed out") { + }); err != doneErr { t.Fatalf("unexpected error: %v", err) } } @@ -1538,29 +1490,20 @@ func TestTxnAbandonCount(t *testing.T) { // which should fail. func TestTxnReadAfterAbandon(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender, cleanupFn := setupMetricsTest(t) + s, metrics, cleanupFn := setupMetricsTest(t) manual := s.Manual defer cleanupFn() value := []byte("value") - // client.Txn supplies a non-cancellable context, which makes the - // transaction coordinator ignore timeout-based abandonment and use the - // context's lifetime instead. In this test, we are testing timeout-based - // abandonment, so we need to supply a non-cancellable context. - var senderFn client.SenderFunc = func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { - return sender.Send(context.TODO(), ba) - } - - db := client.NewDB(senderFn, s.Clock) - - // Test abandoned transaction by making the client timeout ridiculously short. We also set - // the sender to heartbeat very frequently, because the heartbeat detects and tears down - // abandoned transactions. - sender.heartbeatInterval = 2 * time.Millisecond - sender.clientTimeout = 1 * time.Millisecond - - err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + var count int + doneErr := errors.New("retry on abandoned successful; exiting") + db, tc := createNonCancelableDB(s.DB) + err := db.Txn(context.Background(), func(ctx context.Context, txn *client.Txn) error { + count++ + if count == 2 { + return doneErr + } key := []byte("key-abandon") if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil { @@ -1571,12 +1514,12 @@ func TestTxnReadAfterAbandon(t *testing.T) { t.Fatal(err) } - manual.Increment(int64(sender.clientTimeout + sender.heartbeatInterval*2)) + manual.Increment(int64(tc.TxnCoordSenderFactory.clientTimeout + tc.TxnCoordSenderFactory.heartbeatInterval*2)) - checkTxnMetrics(t, sender, "abandon txn", 0, 0, 1, 0, 0) + checkTxnMetrics(t, metrics, "abandon txn", 0, 0, 1, 0, 0) _, err := txn.Get(ctx, key) - if !testutils.IsError(err, "writing transaction timed out") { + if !testutils.IsError(err, "txn aborted") { t.Fatalf("unexpected error from Get on abandoned txn: %v", err) } return err // appease compiler @@ -1589,15 +1532,14 @@ func TestTxnReadAfterAbandon(t *testing.T) { func TestTxnAbortCount(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender, cleanupFn := setupMetricsTest(t) + s, metrics, cleanupFn := setupMetricsTest(t) defer cleanupFn() value := []byte("value") - db := client.NewDB(sender, s.Clock) intentionalErrText := "intentional error to cause abort" // Test aborted transaction. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { key := []byte("key-abort") if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil { @@ -1612,28 +1554,26 @@ func TestTxnAbortCount(t *testing.T) { }); !testutils.IsError(err, intentionalErrText) { t.Fatalf("unexpected error: %v", err) } - teardownHeartbeats(sender) - checkTxnMetrics(t, sender, "abort txn", 0, 0, 0, 1, 0) + checkTxnMetrics(t, metrics, "abort txn", 0, 0, 0, 1, 0) } func TestTxnRestartCount(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender, cleanupFn := setupMetricsTest(t) + s, metrics, cleanupFn := setupMetricsTest(t) defer cleanupFn() key := []byte("key-restart") value := []byte("value") - db := client.NewDB(sender, s.Clock) // Start a transaction and do a GET. This forces a timestamp to be chosen for the transaction. - txn := client.NewTxn(db, 0 /* gatewayNodeID */) + txn := client.NewTxn(s.DB, 0 /* gatewayNodeID */, client.RootTxn) if _, err := txn.Get(context.TODO(), key); err != nil { t.Fatal(err) } // Outside of the transaction, read the same key as was read within the transaction. This // means that future attempts to write will increase the timestamp. - if _, err := db.Get(context.TODO(), key); err != nil { + if _, err := s.DB.Get(context.TODO(), key); err != nil { t.Fatal(err) } @@ -1649,23 +1589,20 @@ func TestTxnRestartCount(t *testing.T) { err := txn.CommitOrCleanup(context.TODO()) assertTransactionRetryError(t, err) - teardownHeartbeats(sender) - checkTxnMetrics(t, sender, "restart txn", 0, 0, 0, 1, 1) + checkTxnMetrics(t, metrics, "restart txn", 0, 0, 0, 1, 1) } func TestTxnDurations(t *testing.T) { defer leaktest.AfterTest(t)() - s, sender, cleanupFn := setupMetricsTest(t) + s, metrics, cleanupFn := setupMetricsTest(t) manual := s.Manual defer cleanupFn() - - db := client.NewDB(sender, s.Clock) const puts = 10 const incr int64 = 1000 for i := 0; i < puts; i++ { key := roachpb.Key(fmt.Sprintf("key-txn-durations-%d", i)) - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil { return err } @@ -1679,11 +1616,9 @@ func TestTxnDurations(t *testing.T) { } } - teardownHeartbeats(sender) - checkTxnMetrics(t, sender, "txn durations", puts, 0, 0, 0, 0) - - hist := sender.metrics.Durations + checkTxnMetrics(t, metrics, "txn durations", puts, 0, 0, 0, 0) + hist := metrics.Durations // The clock is a bit odd in these tests, so I can't test the mean without // introducing spurious errors or being overly lax. // @@ -1715,7 +1650,7 @@ func TestTxnDurations(t *testing.T) { // In TxnCoordSender: // func heartbeatLoop(ctx context.Context, ...) { // if !HasContextLifetime(ctx) && txnMeta.hasClientAbandonedCoord ... { -// tc.tryAbort(txnID) +// tc.tryAsyncAbort() // return // } // } @@ -1733,9 +1668,9 @@ func TestAbortTransactionOnCommitErrors(t *testing.T) { clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) testCases := []struct { - err error - errFn func(roachpb.Transaction) *roachpb.Error - abort bool + err error + errFn func(roachpb.Transaction) *roachpb.Error + asyncAbort bool }{ { errFn: func(txn roachpb.Transaction) *roachpb.Error { @@ -1747,18 +1682,20 @@ func TestAbortTransactionOnCommitErrors(t *testing.T) { roachpb.NewReadWithinUncertaintyIntervalError(hlc.Timestamp{}, hlc.Timestamp{}, nil), &txn) }, - abort: true}, - {err: &roachpb.TransactionAbortedError{}, abort: false}, - {err: &roachpb.TransactionPushError{}, abort: true}, - {err: &roachpb.TransactionRetryError{}, abort: true}, - {err: &roachpb.RangeNotFoundError{}, abort: true}, - {err: &roachpb.RangeKeyMismatchError{}, abort: true}, - {err: &roachpb.TransactionStatusError{}, abort: true}, + asyncAbort: false}, + {err: &roachpb.TransactionAbortedError{}, asyncAbort: true}, + {err: &roachpb.TransactionPushError{}, asyncAbort: false}, + {err: &roachpb.TransactionRetryError{}, asyncAbort: false}, + {err: &roachpb.RangeNotFoundError{}, asyncAbort: false}, + {err: &roachpb.RangeKeyMismatchError{}, asyncAbort: false}, + {err: &roachpb.TransactionStatusError{}, asyncAbort: false}, } for _, test := range testCases { t.Run(fmt.Sprintf("%T", test.err), func(t *testing.T) { - var commit, abort bool + var commit, abort atomic.Value + commit.Store(false) + abort.Store(false) stopper := stop.NewStopper() defer stopper.Stop(context.TODO()) @@ -1784,20 +1721,20 @@ func TestAbortTransactionOnCommitErrors(t *testing.T) { } case *roachpb.EndTransactionRequest: if req.Commit { - commit = true + commit.Store(true) if test.errFn != nil { return nil, test.errFn(*ba.Txn) } return nil, roachpb.NewErrorWithTxn(test.err, ba.Txn) } - abort = true + abort.Store(true) default: t.Fatalf("unexpected batch: %s", ba) } return br, nil } ambient := log.AmbientContext{Tracer: tracing.NewTracer()} - ts := NewTxnCoordSender( + factory := NewTxnCoordSenderFactory( ambient, cluster.MakeTestingClusterSettings(), senderFn, @@ -1806,9 +1743,9 @@ func TestAbortTransactionOnCommitErrors(t *testing.T) { stopper, MakeTxnMetrics(metric.TestSampleInterval), ) - db := client.NewDB(ts, clock) - txn := client.NewTxn(db, 0 /* gatewayNodeID */) + db := client.NewDB(factory, clock) + txn := client.NewTxn(db, 0 /* gatewayNodeID */, client.RootTxn) if pErr := txn.Put(context.Background(), "a", "b"); pErr != nil { t.Fatalf("put failed: %s", pErr) } @@ -1816,13 +1753,18 @@ func TestAbortTransactionOnCommitErrors(t *testing.T) { t.Fatalf("unexpected commit success") } - if !commit { - t.Errorf("%T: failed to find commit", test.err) + if !commit.Load().(bool) { + t.Errorf("%T: failed to find initial commit request", test.err) } - if test.abort && !abort { - t.Errorf("%T: failed to find abort", test.err) - } else if !test.abort && abort { - t.Errorf("%T: found unexpected abort", test.err) + if !test.asyncAbort && !abort.Load().(bool) { + t.Errorf("%T: failed to find expected synchronous abort", test.err) + } else { + testutils.SucceedsSoon(t, func() error { + if !abort.Load().(bool) { + return errors.Errorf("%T: failed to find expected asynchronous abort", test.err) + } + return nil + }) } }) } diff --git a/pkg/kv/txn_correctness_test.go b/pkg/kv/txn_correctness_test.go index 21c980d8864f..ea6375d533e3 100644 --- a/pkg/kv/txn_correctness_test.go +++ b/pkg/kv/txn_correctness_test.go @@ -893,7 +893,7 @@ func checkConcurrency( s := &localtestcluster.LocalTestCluster{ DontRetryPushTxnFailures: true, } - s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) + s.Start(t, testutils.NewNodeTestBaseContext(), InitFactoryForLocalTestCluster) defer s.Stop() verifier.run(isolations, s.DB, t) } diff --git a/pkg/kv/txn_test.go b/pkg/kv/txn_test.go index c52ffbbe1212..72c4c5bc17d5 100644 --- a/pkg/kv/txn_test.go +++ b/pkg/kv/txn_test.go @@ -18,11 +18,14 @@ import ( "bytes" "context" "fmt" + "sync/atomic" "testing" "time" + "unsafe" "github.com/pkg/errors" + "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -42,7 +45,7 @@ import ( // read from inside the txn. func TestTxnDBBasics(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() value := []byte("value") @@ -108,7 +111,7 @@ func BenchmarkSingleRoundtripWithLatency(b *testing.B) { b.Run(fmt.Sprintf("latency=%s", latency), func(b *testing.B) { var s localtestcluster.LocalTestCluster s.Latency = latency - s.Start(b, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) + s.Start(b, testutils.NewNodeTestBaseContext(), InitFactoryForLocalTestCluster) defer s.Stop() defer b.StopTimer() key := roachpb.Key("key") @@ -131,7 +134,7 @@ func BenchmarkSingleRoundtripWithLatency(b *testing.B) { // the transaction, not the forwarded timestamp. func TestSnapshotIsolationIncrement(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() var key = roachpb.Key("a") @@ -212,7 +215,7 @@ func TestSnapshotIsolationIncrement(t *testing.T) { // R1(A) W2(A,"hi") W1(A,"oops!") C1 [serializable restart] R1(A) W1(A,"correct") C1 func TestSnapshotIsolationLostUpdate(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() var key = roachpb.Key("a") @@ -281,7 +284,7 @@ func TestSnapshotIsolationLostUpdate(t *testing.T) { // concurrent reader pushes an intent. func TestPriorityRatchetOnAbortOrPush(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() pushByReading := func(key roachpb.Key) { @@ -365,11 +368,19 @@ func TestPriorityRatchetOnAbortOrPush(t *testing.T) { // by poisoning the gossip NodeID; this may break other functionality which // is usually not relevant in uncertainty tests. func disableOwnNodeCertain(tc *localtestcluster.LocalTestCluster) error { - distSender := tc.Sender.(*TxnCoordSender).wrapped.(*DistSender) - desc := distSender.getNodeDescriptor() - desc.NodeID = 999 - distSender.gossip.NodeID.Reset(desc.NodeID) - return distSender.gossip.SetNodeDescriptor(desc) + ownNodeID := tc.Gossip.NodeID.Get() + if ownNodeID > 0 { + desc := &roachpb.NodeDescriptor{} + if err := tc.Gossip.GetInfoProto(gossip.MakeNodeIDKey(ownNodeID), desc); err == nil { + desc.NodeID = 999 + tc.Gossip.NodeID.Reset(desc.NodeID) + sender := tc.DB.GetSender().(*TxnCoordSender) + ds := sender.wrapped.(*DistSender) + atomic.StorePointer(&ds.nodeDescriptor, unsafe.Pointer(desc)) + return tc.Gossip.SetNodeDescriptor(desc) + } + } + return errors.New("unable to disable own node certain") } // TestUncertaintyRestart verifies that a transaction which finds a write in @@ -384,7 +395,7 @@ func TestUncertaintyRestart(t *testing.T) { Clock: hlc.NewClock(hlc.UnixNano, maxOffset), DBContext: &dbCtx, } - s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) + s.Start(t, testutils.NewNodeTestBaseContext(), InitFactoryForLocalTestCluster) defer s.Stop() if err := disableOwnNodeCertain(s); err != nil { t.Fatal(err) @@ -428,10 +439,10 @@ func TestUncertaintyRestart(t *testing.T) { } } -// TestUncertaintyObservedTimestampForwarding checks that when receiving an +// TestUncertaintyObervedTimestampForwarding checks that when receiving an // uncertainty restart on a node, the next attempt to read (at the increased // timestamp) is free from uncertainty. See roachpb.Transaction for details. -func TestUncertaintyMaxTimestampForwarding(t *testing.T) { +func TestUncertaintyObservedTimestampForwarding(t *testing.T) { defer leaktest.AfterTest(t)() dbCtx := client.DefaultDBContext() @@ -442,7 +453,7 @@ func TestUncertaintyMaxTimestampForwarding(t *testing.T) { Clock: hlc.NewClock(hlc.UnixNano, 50*time.Second), DBContext: &dbCtx, } - s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) + s.Start(t, testutils.NewNodeTestBaseContext(), InitFactoryForLocalTestCluster) defer s.Stop() if err := disableOwnNodeCertain(s); err != nil { t.Fatal(err) @@ -484,7 +495,7 @@ func TestUncertaintyMaxTimestampForwarding(t *testing.T) { // The server's clock suddenly jumps ahead of keyFast's timestamp. s.Manual.Increment(2*offsetNS + 1) - // Now read slowKey first. It should read at 0, catch an uncertainty error, + // Now read keySlow first. It should read at 0, catch an uncertainty error, // and get keySlow's timestamp in that error, but upgrade it to the larger // node clock (which is ahead of keyFast as well). If the last part does // not happen, the read of keyFast should fail (i.e. read nothing). @@ -517,7 +528,7 @@ func TestUncertaintyMaxTimestampForwarding(t *testing.T) { // the transaction's current timestamp instead of original timestamp. func TestTxnTimestampRegression(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() keyA := "a" @@ -556,7 +567,7 @@ func TestTxnTimestampRegression(t *testing.T) { // See issue #676 for full details about original bug. func TestTxnLongDelayBetweenWritesWithConcurrentRead(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() keyA := roachpb.Key("a") @@ -625,7 +636,7 @@ func TestTxnLongDelayBetweenWritesWithConcurrentRead(t *testing.T) { // See issue #676 for full details about original bug. func TestTxnRepeatGetWithRangeSplit(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() keyA := roachpb.Key("a") @@ -710,7 +721,7 @@ func TestTxnRepeatGetWithRangeSplit(t *testing.T) { // with the original timestamp of a restarted transaction. func TestTxnRestartedSerializableTimestampRegression(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() keyA := "a" @@ -776,7 +787,7 @@ func TestTxnRestartedSerializableTimestampRegression(t *testing.T) { // from earlier epochs are cleaned up on transaction commit. func TestTxnResolveIntentsFromMultipleEpochs(t *testing.T) { defer leaktest.AfterTest(t)() - s, _ := createTestDB(t) + s := createTestDB(t) defer s.Stop() keys := []string{"a", "b", "c"} diff --git a/pkg/roachpb/api.pb.go b/pkg/roachpb/api.pb.go index 9db01a7f576a..a807c94f3bf9 100644 --- a/pkg/roachpb/api.pb.go +++ b/pkg/roachpb/api.pb.go @@ -117,6 +117,7 @@ Intent Lease AbortSpanEntry + TxnCoordMeta NotLeaseHolderError NodeUnavailableError UnsupportedRequestError diff --git a/pkg/roachpb/data.pb.go b/pkg/roachpb/data.pb.go index 744d5b9800ee..3e2ac6fb4150 100644 --- a/pkg/roachpb/data.pb.go +++ b/pkg/roachpb/data.pb.go @@ -532,6 +532,27 @@ func (m *AbortSpanEntry) String() string { return proto.CompactTextSt func (*AbortSpanEntry) ProtoMessage() {} func (*AbortSpanEntry) Descriptor() ([]byte, []int) { return fileDescriptorData, []int{13} } +// TxnCoordMeta is metadata held by a transaction coordinator. This +// message is defined here because it is used in several layers of the +// system (internal/client, sql/distsqlrun, kv). +type TxnCoordMeta struct { + // txn is a copy of the transaction record, updated with each request. + Txn Transaction `protobuf:"bytes,1,opt,name=txn" json:"txn"` + // intents stores key spans affected by this transaction through + // this coordinator. These spans allow the coordinator to set the + // list of intent spans in the EndTransactionRequest when the + // transaction is finalized. + Intents []Span `protobuf:"bytes,2,rep,name=intents" json:"intents"` + // command_count indicates how many requests have been sent through + // this transaction. Reset on retryable txn errors. + CommandCount int32 `protobuf:"varint,3,opt,name=command_count,json=commandCount,proto3" json:"command_count,omitempty"` +} + +func (m *TxnCoordMeta) Reset() { *m = TxnCoordMeta{} } +func (m *TxnCoordMeta) String() string { return proto.CompactTextString(m) } +func (*TxnCoordMeta) ProtoMessage() {} +func (*TxnCoordMeta) Descriptor() ([]byte, []int) { return fileDescriptorData, []int{14} } + func init() { proto.RegisterType((*Span)(nil), "cockroach.roachpb.Span") proto.RegisterType((*Value)(nil), "cockroach.roachpb.Value") @@ -547,6 +568,7 @@ func init() { proto.RegisterType((*Intent)(nil), "cockroach.roachpb.Intent") proto.RegisterType((*Lease)(nil), "cockroach.roachpb.Lease") proto.RegisterType((*AbortSpanEntry)(nil), "cockroach.roachpb.AbortSpanEntry") + proto.RegisterType((*TxnCoordMeta)(nil), "cockroach.roachpb.TxnCoordMeta") proto.RegisterEnum("cockroach.roachpb.ValueType", ValueType_name, ValueType_value) proto.RegisterEnum("cockroach.roachpb.ReplicaChangeType", ReplicaChangeType_name, ReplicaChangeType_value) proto.RegisterEnum("cockroach.roachpb.TransactionStatus", TransactionStatus_name, TransactionStatus_value) @@ -1587,6 +1609,49 @@ func (m *AbortSpanEntry) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *TxnCoordMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnCoordMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintData(dAtA, i, uint64(m.Txn.Size())) + n28, err := m.Txn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + if len(m.Intents) > 0 { + for _, msg := range m.Intents { + dAtA[i] = 0x12 + i++ + i = encodeVarintData(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.CommandCount != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintData(dAtA, i, uint64(m.CommandCount)) + } + return i, nil +} + func encodeVarintData(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2020,6 +2085,23 @@ func (m *AbortSpanEntry) Size() (n int) { return n } +func (m *TxnCoordMeta) Size() (n int) { + var l int + _ = l + l = m.Txn.Size() + n += 1 + l + sovData(uint64(l)) + if len(m.Intents) > 0 { + for _, e := range m.Intents { + l = e.Size() + n += 1 + l + sovData(uint64(l)) + } + } + if m.CommandCount != 0 { + n += 1 + sovData(uint64(m.CommandCount)) + } + return n +} + func sovData(x uint64) (n int) { for { n++ @@ -4114,6 +4196,136 @@ func (m *AbortSpanEntry) Unmarshal(dAtA []byte) error { } return nil } +func (m *TxnCoordMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnCoordMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnCoordMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthData + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Intents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthData + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Intents = append(m.Intents, Span{}) + if err := m.Intents[len(m.Intents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommandCount", wireType) + } + m.CommandCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CommandCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipData(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthData + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipData(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -4222,105 +4434,108 @@ var ( func init() { proto.RegisterFile("roachpb/data.proto", fileDescriptorData) } var fileDescriptorData = []byte{ - // 1593 bytes of a gzipped FileDescriptorProto + // 1643 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xcd, 0x8f, 0x1b, 0x49, - 0x15, 0x9f, 0x1e, 0xdb, 0x63, 0xfb, 0xd9, 0x9e, 0xf4, 0x54, 0x32, 0x1b, 0x33, 0x08, 0x3b, 0x18, - 0x84, 0xa2, 0x88, 0xf5, 0x88, 0x04, 0x84, 0x18, 0x21, 0xa4, 0xf1, 0xb8, 0x77, 0xd3, 0x1b, 0x7f, - 0x0c, 0xed, 0xf6, 0x22, 0xb2, 0x42, 0x4d, 0xbb, 0xbb, 0x62, 0xb7, 0xd2, 0xee, 0x6a, 0xaa, 0xca, + 0x15, 0x9f, 0x1e, 0xdb, 0x63, 0xfb, 0xd9, 0x9e, 0xf4, 0x54, 0x32, 0x1b, 0x33, 0x08, 0x3b, 0x78, + 0x11, 0x8a, 0x22, 0xd6, 0x23, 0x12, 0x3e, 0xc4, 0x08, 0x21, 0x8d, 0x3f, 0x76, 0xd3, 0x1b, 0x7f, + 0x0c, 0xed, 0x9e, 0x45, 0x64, 0x85, 0x9a, 0x76, 0x77, 0xc5, 0x6e, 0xc5, 0xdd, 0xd5, 0x54, 0x95, 0x93, 0x31, 0x7f, 0xc1, 0xde, 0xd8, 0x23, 0xc7, 0x48, 0x70, 0x42, 0xe2, 0xbe, 0xff, 0x01, 0xb9, 0x20, 0xed, 0x0d, 0xc4, 0xc1, 0x02, 0x73, 0x59, 0x89, 0x1b, 0xc7, 0x48, 0x48, 0xa8, 0xaa, 0xab, - 0x6d, 0xb3, 0xe3, 0x44, 0x33, 0x2c, 0x07, 0x2e, 0x76, 0xd5, 0xfb, 0xf8, 0xbd, 0x57, 0xaf, 0xde, - 0x47, 0x35, 0x20, 0x4a, 0x5c, 0x6f, 0x12, 0x8f, 0x8e, 0x7d, 0x97, 0xbb, 0xcd, 0x98, 0x12, 0x4e, - 0xd0, 0x81, 0x47, 0xbc, 0xe7, 0x92, 0xde, 0x54, 0xdc, 0xa3, 0x77, 0x52, 0xb1, 0x29, 0xe6, 0xee, - 0x5a, 0xf4, 0xe8, 0x1b, 0x8c, 0x13, 0xea, 0x8e, 0xf1, 0x31, 0x8e, 0xc6, 0x41, 0x94, 0xfe, 0x09, - 0xb9, 0x0b, 0xcf, 0x7b, 0xa4, 0x84, 0xaa, 0x33, 0x1e, 0x84, 0xc7, 0x93, 0xd0, 0x3b, 0xe6, 0xc1, - 0x14, 0x33, 0xee, 0x4e, 0x63, 0xc5, 0xb9, 0x33, 0x26, 0x63, 0x22, 0x97, 0xc7, 0x62, 0x95, 0x50, - 0x1b, 0x3f, 0x86, 0xec, 0x20, 0x76, 0x23, 0xf4, 0x15, 0xc8, 0x3c, 0xc7, 0xf3, 0x6a, 0xe6, 0x9e, - 0x76, 0xbf, 0xdc, 0xca, 0xbf, 0x5e, 0xd4, 0x33, 0x4f, 0xf0, 0xdc, 0x12, 0x34, 0x74, 0x0f, 0xf2, - 0x38, 0xf2, 0x1d, 0xc1, 0xce, 0xfe, 0x27, 0x7b, 0x0f, 0x47, 0xfe, 0x13, 0x3c, 0x3f, 0x29, 0xff, - 0xfa, 0x65, 0x7d, 0xe7, 0xd3, 0x97, 0x75, 0xed, 0xf3, 0x97, 0x75, 0xad, 0x31, 0x85, 0xdc, 0x87, - 0x6e, 0x38, 0xc3, 0xe8, 0xab, 0x50, 0xa4, 0xee, 0x0b, 0x67, 0x34, 0xe7, 0x98, 0x55, 0x35, 0xa1, - 0x6a, 0x15, 0xa8, 0xfb, 0xa2, 0x25, 0xf6, 0xe8, 0x14, 0x8a, 0x2b, 0x0f, 0xab, 0xbb, 0xf7, 0xb4, - 0xfb, 0xa5, 0x87, 0x5f, 0x6b, 0xae, 0x83, 0x21, 0x8e, 0xd1, 0x9c, 0x84, 0x5e, 0xd3, 0x4e, 0x85, - 0x5a, 0xd9, 0x57, 0x8b, 0xfa, 0x8e, 0xb5, 0xd6, 0x3a, 0xc9, 0x4a, 0x73, 0x1f, 0x41, 0xe1, 0x09, - 0x9e, 0x27, 0x16, 0xd5, 0x29, 0xb4, 0x2d, 0xa7, 0xf8, 0x2e, 0xe4, 0x2e, 0x84, 0x8c, 0xb2, 0x55, - 0x6d, 0x5e, 0x09, 0x7c, 0x53, 0x62, 0x28, 0x33, 0x89, 0x70, 0xe3, 0x4f, 0x1a, 0xc0, 0x80, 0x13, - 0x8a, 0x4d, 0x1f, 0x47, 0x1c, 0x8d, 0x01, 0xbc, 0x70, 0xc6, 0x38, 0xa6, 0x4e, 0xe0, 0x2b, 0x33, - 0x8f, 0x85, 0xfc, 0x5f, 0x16, 0xf5, 0x47, 0xe3, 0x80, 0x4f, 0x66, 0xa3, 0xa6, 0x47, 0xa6, 0xc7, - 0x2b, 0x6c, 0x7f, 0xb4, 0x5e, 0x1f, 0xc7, 0xcf, 0xc7, 0xc7, 0xf2, 0x7a, 0x66, 0xb3, 0xc0, 0x6f, - 0x0e, 0x87, 0x66, 0x7b, 0xb9, 0xa8, 0x17, 0xcf, 0x12, 0x40, 0xb3, 0x6d, 0x15, 0x15, 0xb6, 0xe9, - 0xa3, 0x77, 0x21, 0x1f, 0x11, 0x1f, 0x0b, 0x2b, 0xc2, 0xdf, 0x5c, 0xeb, 0xce, 0x72, 0x51, 0xdf, - 0xeb, 0x11, 0x1f, 0x9b, 0xed, 0xd7, 0xab, 0x95, 0xb5, 0x27, 0x84, 0x4c, 0x1f, 0x7d, 0x07, 0x0a, - 0x22, 0x39, 0xa4, 0x7c, 0x46, 0xca, 0xbf, 0xb3, 0x5c, 0xd4, 0xf3, 0x89, 0xe7, 0x42, 0x21, 0x5d, - 0x5a, 0x79, 0x96, 0x9c, 0xa6, 0xf1, 0x3b, 0x0d, 0xca, 0x83, 0x38, 0x0c, 0xb8, 0x4d, 0x83, 0xf1, - 0x18, 0x53, 0x64, 0x40, 0x31, 0xc4, 0xcf, 0xb8, 0xe3, 0x63, 0xe6, 0xc9, 0xa3, 0x95, 0x1e, 0x36, - 0xb6, 0x04, 0xc9, 0x72, 0xa3, 0x31, 0x6e, 0x63, 0xe6, 0xd1, 0x20, 0xe6, 0x84, 0xaa, 0x70, 0x15, - 0x84, 0xaa, 0xa0, 0xa2, 0xf7, 0x01, 0x68, 0x30, 0x9e, 0x28, 0x9c, 0xdd, 0x1b, 0xe2, 0x14, 0xa5, - 0xae, 0x20, 0x27, 0xb7, 0xfb, 0x41, 0xb6, 0x90, 0xd1, 0xb3, 0x8d, 0xdf, 0x6a, 0x50, 0xee, 0x62, - 0x3a, 0xc6, 0xff, 0xd7, 0xce, 0x36, 0xfe, 0xb8, 0x0b, 0x87, 0x67, 0x13, 0x21, 0x6b, 0xe1, 0x38, - 0x0c, 0x3c, 0x97, 0xad, 0xfd, 0x2d, 0x79, 0x92, 0xe1, 0xf0, 0x79, 0x8c, 0xa5, 0xc7, 0xfb, 0x0f, - 0xbf, 0xb9, 0xcd, 0x52, 0xa2, 0x98, 0xa0, 0xd8, 0xf3, 0x18, 0x5b, 0xe0, 0xad, 0xd6, 0xa8, 0x0d, - 0x79, 0x9a, 0x08, 0x28, 0x67, 0xdf, 0x02, 0x71, 0xc5, 0xdd, 0x54, 0x15, 0x0d, 0x41, 0x9f, 0xc5, - 0xbe, 0xcb, 0xb1, 0xef, 0x28, 0x12, 0xab, 0x66, 0xee, 0x65, 0x6e, 0x08, 0x77, 0x4b, 0x61, 0xa4, - 0x47, 0x45, 0xef, 0xc1, 0xad, 0x08, 0x5f, 0xf2, 0x14, 0x53, 0xe4, 0x62, 0x56, 0xe6, 0x62, 0x6d, - 0xb9, 0xa8, 0x57, 0x7a, 0xf8, 0x92, 0x2b, 0x51, 0x99, 0x91, 0xc5, 0xd5, 0xc6, 0xaa, 0x44, 0x1b, - 0x3c, 0xff, 0xa4, 0x20, 0xba, 0x89, 0x8c, 0xe7, 0x27, 0x1a, 0xdc, 0xee, 0x12, 0x3f, 0x78, 0x16, - 0x60, 0x5f, 0x74, 0xa9, 0x34, 0x9a, 0xdf, 0x06, 0xc4, 0xe6, 0x8c, 0xe3, 0xa9, 0xe3, 0x91, 0xe8, - 0x59, 0x30, 0x76, 0x58, 0xec, 0x46, 0x32, 0xa8, 0x05, 0x4b, 0x4f, 0x38, 0x67, 0x92, 0x21, 0x5b, - 0x9b, 0x01, 0x48, 0xd6, 0x52, 0x18, 0x5c, 0xe0, 0x08, 0x33, 0x96, 0x48, 0x27, 0xf1, 0xbb, 0xbb, - 0xe5, 0xc0, 0x42, 0xc9, 0xd2, 0x85, 0x4a, 0x47, 0x69, 0x08, 0x8a, 0xba, 0xe2, 0x7f, 0xec, 0xc2, - 0xa1, 0x19, 0x71, 0x4c, 0x23, 0x37, 0x3c, 0x23, 0xd3, 0xe9, 0xba, 0x7e, 0xda, 0x50, 0x61, 0xa2, - 0x9e, 0x1c, 0x9e, 0x10, 0x54, 0x5a, 0xd6, 0xb7, 0x5a, 0x58, 0xd7, 0x9d, 0x55, 0x66, 0x9b, 0x55, - 0xd8, 0x86, 0xca, 0x54, 0x24, 0xfa, 0x0a, 0x65, 0xf7, 0x8d, 0x28, 0x9b, 0x05, 0x61, 0x95, 0xa7, - 0x9b, 0xe5, 0xf1, 0x73, 0xb8, 0xab, 0xd2, 0x2d, 0xbd, 0xe0, 0x15, 0x5e, 0x46, 0xe2, 0xdd, 0xdf, - 0x82, 0xb7, 0x35, 0x73, 0xad, 0x43, 0x6f, 0x6b, 0x42, 0x3f, 0x85, 0xc3, 0xa9, 0xba, 0x19, 0x19, - 0xcf, 0x15, 0x7e, 0x56, 0xe2, 0x7f, 0x6b, 0x9b, 0xbf, 0x57, 0x6f, 0xd2, 0xba, 0x3d, 0xbd, 0x4a, - 0x3c, 0x29, 0x7c, 0x9c, 0x8e, 0x92, 0x5f, 0x69, 0x70, 0xd0, 0x1f, 0x31, 0x4c, 0x2f, 0xb0, 0xbf, - 0x1a, 0x04, 0x9b, 0xcd, 0x51, 0xbb, 0x46, 0x73, 0xfc, 0x1f, 0x4c, 0x9a, 0xc2, 0x6a, 0xb8, 0x7d, - 0x9e, 0x83, 0x92, 0x4d, 0xdd, 0x88, 0xb9, 0x1e, 0x0f, 0x48, 0x84, 0x1e, 0x43, 0x56, 0x8c, 0x69, - 0x75, 0xd9, 0x0f, 0x36, 0x70, 0xd5, 0xb4, 0x6e, 0x26, 0x63, 0xba, 0x99, 0x4e, 0xeb, 0xa6, 0x7d, - 0x19, 0x75, 0x31, 0x77, 0x5b, 0x05, 0x61, 0xe4, 0xb3, 0x45, 0x5d, 0xb3, 0x24, 0x02, 0x42, 0x90, - 0x8d, 0xdc, 0x69, 0x32, 0x9f, 0x8a, 0x96, 0x5c, 0xa3, 0x1f, 0xc2, 0x1e, 0xe3, 0x2e, 0x9f, 0x31, - 0x19, 0xd6, 0xed, 0x1d, 0x63, 0xc3, 0x9b, 0x81, 0x94, 0xb5, 0x94, 0x0e, 0xfa, 0x00, 0xf6, 0x43, - 0x97, 0x71, 0x67, 0x82, 0x5d, 0xca, 0x47, 0xd8, 0xe5, 0xd5, 0xdc, 0xf5, 0x4f, 0x5f, 0x11, 0xaa, - 0x8f, 0x53, 0x4d, 0x81, 0x45, 0x68, 0x30, 0x76, 0xd6, 0x91, 0xdc, 0xbb, 0x01, 0x96, 0x50, 0x5d, - 0xdf, 0xdf, 0x63, 0xa8, 0x4c, 0xdd, 0xcb, 0x0d, 0xa8, 0xfc, 0xf5, 0xa1, 0xca, 0x53, 0xf7, 0x72, - 0x8d, 0xf4, 0x11, 0xdc, 0x26, 0x2a, 0x3d, 0xd6, 0x70, 0xac, 0x5a, 0x78, 0x63, 0x33, 0xbb, 0x92, - 0x4c, 0x0a, 0x16, 0x91, 0x2f, 0x32, 0x18, 0xaa, 0x42, 0xfe, 0x05, 0x0d, 0x78, 0x10, 0x8d, 0xab, - 0x45, 0xd9, 0x5a, 0xd2, 0x2d, 0xfa, 0x3e, 0xe4, 0x83, 0x88, 0xe3, 0x88, 0xb3, 0x6a, 0x49, 0x9a, - 0x7a, 0x53, 0x1b, 0x49, 0x3b, 0xaf, 0x92, 0x46, 0x0d, 0xa8, 0x08, 0x0c, 0xec, 0x70, 0x42, 0x1c, - 0x12, 0xfa, 0xd5, 0xb2, 0x04, 0x2e, 0x49, 0xa2, 0x4d, 0x48, 0x3f, 0xf4, 0x85, 0x0c, 0xc5, 0x9c, - 0xce, 0x1d, 0x12, 0x39, 0xf1, 0x8c, 0x4d, 0xaa, 0x95, 0x44, 0x46, 0x12, 0xfb, 0xd1, 0xf9, 0x8c, - 0x4d, 0xd0, 0x10, 0xee, 0xe0, 0x98, 0x78, 0x13, 0xe7, 0x97, 0x98, 0x92, 0x8d, 0x40, 0xee, 0x5f, - 0x3f, 0x90, 0x48, 0x02, 0x3c, 0xc5, 0x94, 0xac, 0x38, 0x5f, 0x78, 0xc7, 0xfd, 0x41, 0x83, 0x3d, - 0x53, 0x3a, 0x8e, 0xbe, 0x07, 0xd9, 0x55, 0x8b, 0x7d, 0xcb, 0x69, 0x37, 0x52, 0x5a, 0x88, 0xa3, - 0x16, 0x64, 0xf8, 0x65, 0xda, 0x6a, 0x6f, 0x52, 0x1b, 0x89, 0x8b, 0x42, 0x79, 0xa3, 0x04, 0x32, - 0x37, 0x2f, 0x01, 0xd5, 0xb4, 0xff, 0x95, 0x81, 0x5c, 0x07, 0xbb, 0x0c, 0xa3, 0x1f, 0x40, 0x8e, - 0x71, 0x97, 0x72, 0x75, 0x92, 0x6b, 0x45, 0x2a, 0xd1, 0x40, 0x3f, 0x03, 0xc0, 0x97, 0x71, 0x40, - 0x5d, 0x61, 0xe6, 0x7a, 0x7d, 0xa4, 0xf6, 0xcf, 0x45, 0xfd, 0x68, 0xe3, 0x2d, 0x78, 0xd2, 0xa0, - 0x6e, 0xe4, 0x47, 0xb3, 0x30, 0x74, 0x47, 0x21, 0x6e, 0x58, 0x1b, 0x80, 0x9b, 0xa3, 0x3d, 0xf3, - 0xdf, 0x8f, 0xf6, 0x19, 0xdc, 0xf5, 0x71, 0x4c, 0xb1, 0x27, 0xa7, 0xbb, 0x74, 0x5c, 0xfc, 0xb2, - 0x80, 0xa9, 0xc6, 0xfc, 0x25, 0x3d, 0x3e, 0x5c, 0xa3, 0x0f, 0x04, 0xf8, 0x40, 0x62, 0xa3, 0x1e, - 0x94, 0x62, 0x4a, 0x62, 0xc2, 0x44, 0x1d, 0xb2, 0xeb, 0xb5, 0x99, 0xfd, 0xe5, 0xa2, 0x0e, 0xe7, - 0x4a, 0xcb, 0x1e, 0x58, 0x90, 0x22, 0xd8, 0x0c, 0xdd, 0x81, 0x9c, 0x4c, 0x4f, 0xd9, 0x64, 0x32, - 0x56, 0xb2, 0x41, 0xef, 0x42, 0x81, 0xe1, 0x5f, 0xcc, 0x70, 0xe4, 0x61, 0xd9, 0x32, 0x32, 0xad, - 0x83, 0xd7, 0x8b, 0x7a, 0x45, 0xde, 0xec, 0x40, 0x31, 0xac, 0x95, 0x48, 0xf2, 0x8e, 0xf8, 0x54, - 0xbd, 0x23, 0xf6, 0x4f, 0x47, 0x84, 0x72, 0x91, 0xa5, 0x46, 0xc4, 0xe9, 0xfc, 0x6d, 0x5f, 0x0a, - 0x5f, 0x7e, 0x5e, 0xa0, 0x23, 0x28, 0xc4, 0x34, 0x20, 0x34, 0xe0, 0xc9, 0x27, 0x55, 0xce, 0x5a, - 0xed, 0x4f, 0xb2, 0xc2, 0xa5, 0x07, 0xbf, 0xd7, 0xa0, 0x28, 0xbf, 0x37, 0xe4, 0xbb, 0xae, 0x04, - 0xf9, 0x61, 0xef, 0x49, 0xaf, 0xff, 0x93, 0x9e, 0xbe, 0x83, 0x0a, 0x90, 0xed, 0x0d, 0x3b, 0x1d, - 0x3d, 0x8f, 0xf2, 0x90, 0x31, 0x7b, 0xb6, 0xae, 0xa1, 0x22, 0xe4, 0xde, 0xeb, 0xf4, 0x4f, 0x6d, - 0x7d, 0x57, 0x2c, 0x5b, 0x3f, 0xb5, 0x8d, 0x81, 0x9e, 0x41, 0xb7, 0xe1, 0x56, 0xdb, 0xe8, 0x98, - 0x5d, 0xd3, 0x36, 0xda, 0x4e, 0x42, 0x2c, 0x08, 0x6d, 0xdb, 0xec, 0x1a, 0x7a, 0x56, 0x80, 0xb6, - 0x8d, 0x33, 0xb3, 0x7b, 0xda, 0xd1, 0x73, 0xe8, 0x10, 0x0e, 0xd6, 0xb2, 0x29, 0xb9, 0x88, 0xca, - 0x50, 0x68, 0x0f, 0xad, 0x53, 0xdb, 0xec, 0xf7, 0xf4, 0x3d, 0x81, 0x6d, 0x0f, 0xcf, 0x3b, 0x86, - 0x0e, 0x68, 0x1f, 0x40, 0xc0, 0x0c, 0x0c, 0xcb, 0x34, 0x06, 0xba, 0xff, 0xe0, 0x47, 0x70, 0x70, - 0xe5, 0x69, 0x8a, 0x6e, 0x41, 0xe9, 0xb4, 0xdd, 0x76, 0x2c, 0xe3, 0xbc, 0x63, 0x9e, 0x9d, 0xea, - 0x3b, 0x08, 0xc1, 0xbe, 0x65, 0x74, 0xfb, 0x1f, 0x1a, 0x2b, 0x9a, 0x76, 0x94, 0xfd, 0xf8, 0x37, - 0xb5, 0x9d, 0x07, 0x2d, 0x38, 0xb8, 0x52, 0xa5, 0xc2, 0xc3, 0x73, 0xa3, 0xd7, 0x36, 0x7b, 0xef, - 0xeb, 0x3b, 0xa8, 0x02, 0xc5, 0xb3, 0x7e, 0xb7, 0x6b, 0xda, 0xb6, 0xd1, 0xd6, 0x35, 0xc1, 0x3b, - 0x6d, 0xf5, 0x2d, 0xb1, 0xd9, 0x4d, 0x30, 0x5a, 0x5f, 0x7f, 0xf5, 0xb7, 0xda, 0xce, 0xab, 0x65, - 0x4d, 0xfb, 0x6c, 0x59, 0xd3, 0xfe, 0xbc, 0xac, 0x69, 0x7f, 0x5d, 0xd6, 0xb4, 0x4f, 0xfe, 0x5e, - 0xdb, 0x79, 0x9a, 0x57, 0x05, 0x32, 0xda, 0x93, 0x5f, 0xb5, 0x8f, 0xfe, 0x1d, 0x00, 0x00, 0xff, - 0xff, 0xb4, 0xd4, 0xd6, 0x8a, 0x6b, 0x0f, 0x00, 0x00, + 0xed, 0x66, 0xc7, 0x89, 0x66, 0x08, 0x07, 0x2e, 0x76, 0xd5, 0xfb, 0xf8, 0xbd, 0x57, 0xaf, 0xde, + 0x47, 0x35, 0x20, 0x4a, 0x1c, 0x77, 0x1a, 0x8d, 0x8f, 0x3d, 0x87, 0x3b, 0xcd, 0x88, 0x12, 0x4e, + 0xd0, 0x81, 0x4b, 0xdc, 0xa7, 0x92, 0xde, 0x54, 0xdc, 0xa3, 0x77, 0x12, 0xb1, 0x00, 0x73, 0x67, + 0x23, 0x7a, 0xf4, 0x2e, 0xe3, 0x84, 0x3a, 0x13, 0x7c, 0x8c, 0xc3, 0x89, 0x1f, 0x26, 0x7f, 0x42, + 0xee, 0x99, 0xeb, 0x3e, 0x50, 0x42, 0xd5, 0x39, 0xf7, 0x67, 0xc7, 0xd3, 0x99, 0x7b, 0xcc, 0xfd, + 0x00, 0x33, 0xee, 0x04, 0x91, 0xe2, 0xdc, 0x9a, 0x90, 0x09, 0x91, 0xcb, 0x63, 0xb1, 0x8a, 0xa9, + 0x8d, 0x1f, 0x43, 0x76, 0x14, 0x39, 0x21, 0xfa, 0x0a, 0x64, 0x9e, 0xe2, 0x45, 0x35, 0x73, 0x47, + 0xbb, 0x5b, 0x6e, 0xe5, 0x5f, 0x2d, 0xeb, 0x99, 0x47, 0x78, 0x61, 0x0a, 0x1a, 0xba, 0x03, 0x79, + 0x1c, 0x7a, 0xb6, 0x60, 0x67, 0xff, 0x93, 0xbd, 0x87, 0x43, 0xef, 0x11, 0x5e, 0x9c, 0x94, 0x7f, + 0xfd, 0xa2, 0xbe, 0xf3, 0xd9, 0x8b, 0xba, 0xf6, 0xc5, 0x8b, 0xba, 0xd6, 0x08, 0x20, 0xf7, 0x91, + 0x33, 0x9b, 0x63, 0xf4, 0x55, 0x28, 0x52, 0xe7, 0xb9, 0x3d, 0x5e, 0x70, 0xcc, 0xaa, 0x9a, 0x50, + 0x35, 0x0b, 0xd4, 0x79, 0xde, 0x12, 0x7b, 0x74, 0x0a, 0xc5, 0xb5, 0x87, 0xd5, 0xdd, 0x3b, 0xda, + 0xdd, 0xd2, 0xfd, 0xaf, 0x35, 0x37, 0xc1, 0x10, 0xc7, 0x68, 0x4e, 0x67, 0x6e, 0xd3, 0x4a, 0x84, + 0x5a, 0xd9, 0x97, 0xcb, 0xfa, 0x8e, 0xb9, 0xd1, 0x3a, 0xc9, 0x4a, 0x73, 0x1f, 0x43, 0xe1, 0x11, + 0x5e, 0xc4, 0x16, 0xd5, 0x29, 0xb4, 0x2d, 0xa7, 0xf8, 0x0e, 0xe4, 0x9e, 0x09, 0x19, 0x65, 0xab, + 0xda, 0xbc, 0x14, 0xf8, 0xa6, 0xc4, 0x50, 0x66, 0x62, 0xe1, 0xc6, 0x9f, 0x34, 0x80, 0x11, 0x27, + 0x14, 0x1b, 0x1e, 0x0e, 0x39, 0x9a, 0x00, 0xb8, 0xb3, 0x39, 0xe3, 0x98, 0xda, 0xbe, 0xa7, 0xcc, + 0x3c, 0x14, 0xf2, 0x7f, 0x59, 0xd6, 0x1f, 0x4c, 0x7c, 0x3e, 0x9d, 0x8f, 0x9b, 0x2e, 0x09, 0x8e, + 0xd7, 0xd8, 0xde, 0x78, 0xb3, 0x3e, 0x8e, 0x9e, 0x4e, 0x8e, 0xe5, 0xf5, 0xcc, 0xe7, 0xbe, 0xd7, + 0x3c, 0x3f, 0x37, 0x3a, 0xab, 0x65, 0xbd, 0xd8, 0x8e, 0x01, 0x8d, 0x8e, 0x59, 0x54, 0xd8, 0x86, + 0x87, 0xde, 0x83, 0x7c, 0x48, 0x3c, 0x2c, 0xac, 0x08, 0x7f, 0x73, 0xad, 0x5b, 0xab, 0x65, 0x7d, + 0x6f, 0x40, 0x3c, 0x6c, 0x74, 0x5e, 0xad, 0x57, 0xe6, 0x9e, 0x10, 0x32, 0x3c, 0xf4, 0x6d, 0x28, + 0x88, 0xe4, 0x90, 0xf2, 0x19, 0x29, 0xff, 0xce, 0x6a, 0x59, 0xcf, 0xc7, 0x9e, 0x0b, 0x85, 0x64, + 0x69, 0xe6, 0x59, 0x7c, 0x9a, 0xc6, 0xef, 0x34, 0x28, 0x8f, 0xa2, 0x99, 0xcf, 0x2d, 0xea, 0x4f, + 0x26, 0x98, 0xa2, 0x2e, 0x14, 0x67, 0xf8, 0x09, 0xb7, 0x3d, 0xcc, 0x5c, 0x79, 0xb4, 0xd2, 0xfd, + 0xc6, 0x96, 0x20, 0x99, 0x4e, 0x38, 0xc1, 0x1d, 0xcc, 0x5c, 0xea, 0x47, 0x9c, 0x50, 0x15, 0xae, + 0x82, 0x50, 0x15, 0x54, 0xf4, 0x01, 0x00, 0xf5, 0x27, 0x53, 0x85, 0xb3, 0x7b, 0x4d, 0x9c, 0xa2, + 0xd4, 0x15, 0xe4, 0xf8, 0x76, 0x3f, 0xcc, 0x16, 0x32, 0x7a, 0xb6, 0xf1, 0x5b, 0x0d, 0xca, 0x7d, + 0x4c, 0x27, 0xf8, 0xff, 0xda, 0xd9, 0xc6, 0x1f, 0x77, 0xe1, 0xb0, 0x3d, 0x15, 0xb2, 0x26, 0x8e, + 0x66, 0xbe, 0xeb, 0xb0, 0x8d, 0xbf, 0x25, 0x57, 0x32, 0x6c, 0xbe, 0x88, 0xb0, 0xf4, 0x78, 0xff, + 0xfe, 0x37, 0xb6, 0x59, 0x8a, 0x15, 0x63, 0x14, 0x6b, 0x11, 0x61, 0x13, 0xdc, 0xf5, 0x1a, 0x75, + 0x20, 0x4f, 0x63, 0x01, 0xe5, 0xec, 0x1b, 0x20, 0x2e, 0xb9, 0x9b, 0xa8, 0xa2, 0x73, 0xd0, 0xe7, + 0x91, 0xe7, 0x70, 0xec, 0xd9, 0x8a, 0xc4, 0xaa, 0x99, 0x3b, 0x99, 0x6b, 0xc2, 0xdd, 0x50, 0x18, + 0xc9, 0x51, 0xd1, 0xfb, 0x70, 0x23, 0xc4, 0x17, 0x3c, 0xc1, 0x14, 0xb9, 0x98, 0x95, 0xb9, 0x58, + 0x5b, 0x2d, 0xeb, 0x95, 0x01, 0xbe, 0xe0, 0x4a, 0x54, 0x66, 0x64, 0x71, 0xbd, 0x31, 0x2b, 0x61, + 0x8a, 0xe7, 0x9d, 0x14, 0x44, 0x37, 0x91, 0xf1, 0xfc, 0x54, 0x83, 0x9b, 0x7d, 0xe2, 0xf9, 0x4f, + 0x7c, 0xec, 0x89, 0x2e, 0x95, 0x44, 0xf3, 0x5b, 0x80, 0xd8, 0x82, 0x71, 0x1c, 0xd8, 0x2e, 0x09, + 0x9f, 0xf8, 0x13, 0x9b, 0x45, 0x4e, 0x28, 0x83, 0x5a, 0x30, 0xf5, 0x98, 0xd3, 0x96, 0x0c, 0xd9, + 0xda, 0xba, 0x80, 0x64, 0x2d, 0xcd, 0xfc, 0x67, 0x38, 0xc4, 0x8c, 0xc5, 0xd2, 0x71, 0xfc, 0x6e, + 0x6f, 0x39, 0xb0, 0x50, 0x32, 0x75, 0xa1, 0xd2, 0x53, 0x1a, 0x82, 0xa2, 0xae, 0xf8, 0x1f, 0xbb, + 0x70, 0x68, 0x84, 0x1c, 0xd3, 0xd0, 0x99, 0xb5, 0x49, 0x10, 0x6c, 0xea, 0xa7, 0x03, 0x15, 0x26, + 0xea, 0xc9, 0xe6, 0x31, 0x41, 0xa5, 0x65, 0x7d, 0xab, 0x85, 0x4d, 0xdd, 0x99, 0x65, 0x96, 0xae, + 0xc2, 0x0e, 0x54, 0x02, 0x91, 0xe8, 0x6b, 0x94, 0xdd, 0xd7, 0xa2, 0xa4, 0x0b, 0xc2, 0x2c, 0x07, + 0xe9, 0xf2, 0xf8, 0x39, 0xdc, 0x56, 0xe9, 0x96, 0x5c, 0xf0, 0x1a, 0x2f, 0x23, 0xf1, 0xee, 0x6e, + 0xc1, 0xdb, 0x9a, 0xb9, 0xe6, 0xa1, 0xbb, 0x35, 0xa1, 0x1f, 0xc3, 0x61, 0xa0, 0x6e, 0x46, 0xc6, + 0x73, 0x8d, 0x9f, 0x95, 0xf8, 0xdf, 0xdc, 0xe6, 0xef, 0xe5, 0x9b, 0x34, 0x6f, 0x06, 0x97, 0x89, + 0x27, 0x85, 0x4f, 0x92, 0x51, 0xf2, 0x2b, 0x0d, 0x0e, 0x86, 0x63, 0x86, 0xe9, 0x33, 0xec, 0xad, + 0x07, 0x41, 0xba, 0x39, 0x6a, 0x57, 0x68, 0x8e, 0xff, 0x83, 0x49, 0x53, 0x58, 0x0f, 0xb7, 0x2f, + 0x72, 0x50, 0xb2, 0xa8, 0x13, 0x32, 0xc7, 0xe5, 0x3e, 0x09, 0xd1, 0x43, 0xc8, 0x8a, 0x31, 0xad, + 0x2e, 0xfb, 0x5e, 0x0a, 0x57, 0x4d, 0xeb, 0x66, 0x3c, 0xa6, 0x9b, 0xc9, 0xb4, 0x6e, 0x5a, 0x17, + 0x61, 0x1f, 0x73, 0xa7, 0x55, 0x10, 0x46, 0x3e, 0x5f, 0xd6, 0x35, 0x53, 0x22, 0x20, 0x04, 0xd9, + 0xd0, 0x09, 0xe2, 0xf9, 0x54, 0x34, 0xe5, 0x1a, 0xfd, 0x10, 0xf6, 0x18, 0x77, 0xf8, 0x9c, 0xc9, + 0xb0, 0x6e, 0xef, 0x18, 0x29, 0x6f, 0x46, 0x52, 0xd6, 0x54, 0x3a, 0xe8, 0x43, 0xd8, 0x9f, 0x39, + 0x8c, 0xdb, 0x53, 0xec, 0x50, 0x3e, 0xc6, 0x0e, 0xaf, 0xe6, 0xae, 0x7e, 0xfa, 0x8a, 0x50, 0x7d, + 0x98, 0x68, 0x0a, 0x2c, 0x42, 0xfd, 0x89, 0xbd, 0x89, 0xe4, 0xde, 0x35, 0xb0, 0x84, 0xea, 0xe6, + 0xfe, 0x1e, 0x42, 0x25, 0x70, 0x2e, 0x52, 0x50, 0xf9, 0xab, 0x43, 0x95, 0x03, 0xe7, 0x62, 0x83, + 0xf4, 0x31, 0xdc, 0x24, 0x2a, 0x3d, 0x36, 0x70, 0xac, 0x5a, 0x78, 0x6d, 0x33, 0xbb, 0x94, 0x4c, + 0x0a, 0x16, 0x91, 0x2f, 0x33, 0x18, 0xaa, 0x42, 0xfe, 0x39, 0xf5, 0xb9, 0x1f, 0x4e, 0xaa, 0x45, + 0xd9, 0x5a, 0x92, 0x2d, 0xfa, 0x3e, 0xe4, 0xfd, 0x90, 0xe3, 0x90, 0xb3, 0x6a, 0x49, 0x9a, 0x7a, + 0x5d, 0x1b, 0x49, 0x3a, 0xaf, 0x92, 0x46, 0x0d, 0xa8, 0x08, 0x0c, 0x6c, 0x73, 0x42, 0x6c, 0x32, + 0xf3, 0xaa, 0x65, 0x09, 0x5c, 0x92, 0x44, 0x8b, 0x90, 0xe1, 0xcc, 0x13, 0x32, 0x14, 0x73, 0xba, + 0xb0, 0x49, 0x68, 0x47, 0x73, 0x36, 0xad, 0x56, 0x62, 0x19, 0x49, 0x1c, 0x86, 0x67, 0x73, 0x36, + 0x45, 0xe7, 0x70, 0x0b, 0x47, 0xc4, 0x9d, 0xda, 0xbf, 0xc4, 0x94, 0xa4, 0x02, 0xb9, 0x7f, 0xf5, + 0x40, 0x22, 0x09, 0xf0, 0x18, 0x53, 0xb2, 0xe6, 0x7c, 0xe9, 0x1d, 0xf7, 0x07, 0x0d, 0xf6, 0x0c, + 0xe9, 0x38, 0xfa, 0x2e, 0x64, 0xd7, 0x2d, 0xf6, 0x0d, 0xa7, 0x4d, 0xa5, 0xb4, 0x10, 0x47, 0x2d, + 0xc8, 0xf0, 0x8b, 0xa4, 0xd5, 0x5e, 0xa7, 0x36, 0x62, 0x17, 0x85, 0x72, 0xaa, 0x04, 0x32, 0xd7, + 0x2f, 0x01, 0xd5, 0xb4, 0xff, 0x95, 0x81, 0x5c, 0x0f, 0x3b, 0x0c, 0xa3, 0x1f, 0x40, 0x8e, 0x71, + 0x87, 0x72, 0x75, 0x92, 0x2b, 0x45, 0x2a, 0xd6, 0x40, 0x3f, 0x03, 0xc0, 0x17, 0x91, 0x4f, 0x1d, + 0x61, 0xe6, 0x6a, 0x7d, 0xa4, 0xf6, 0xcf, 0x65, 0xfd, 0x28, 0xf5, 0x16, 0x3c, 0x69, 0x50, 0x27, + 0xf4, 0xc2, 0xf9, 0x6c, 0xe6, 0x8c, 0x67, 0xb8, 0x61, 0xa6, 0x00, 0xd3, 0xa3, 0x3d, 0xf3, 0xdf, + 0x8f, 0xf6, 0x39, 0xdc, 0xf6, 0x70, 0x44, 0xb1, 0x2b, 0xa7, 0xbb, 0x74, 0x5c, 0xfc, 0x32, 0x9f, + 0xa9, 0xc6, 0xfc, 0x96, 0x1e, 0x1f, 0x6e, 0xd0, 0x47, 0x02, 0x7c, 0x24, 0xb1, 0xd1, 0x00, 0x4a, + 0x11, 0x25, 0x11, 0x61, 0xa2, 0x0e, 0xd9, 0xd5, 0xda, 0xcc, 0xfe, 0x6a, 0x59, 0x87, 0x33, 0xa5, + 0x65, 0x8d, 0x4c, 0x48, 0x10, 0x2c, 0x86, 0x6e, 0x41, 0x4e, 0xa6, 0xa7, 0x6c, 0x32, 0x19, 0x33, + 0xde, 0xa0, 0xf7, 0xa0, 0xc0, 0xf0, 0x2f, 0xe6, 0x38, 0x74, 0xb1, 0x6c, 0x19, 0x99, 0xd6, 0xc1, + 0xab, 0x65, 0xbd, 0x22, 0x6f, 0x76, 0xa4, 0x18, 0xe6, 0x5a, 0x24, 0x7e, 0x47, 0x7c, 0xa6, 0xde, + 0x11, 0xfb, 0xa7, 0x63, 0x42, 0xb9, 0xc8, 0xd2, 0x6e, 0xc8, 0xe9, 0xe2, 0x4d, 0x5f, 0x0a, 0x6f, + 0x3f, 0x2f, 0xd0, 0x11, 0x14, 0x22, 0xea, 0x13, 0xea, 0xf3, 0xf8, 0x93, 0x2a, 0x67, 0xae, 0xf7, + 0x27, 0x59, 0xe9, 0x92, 0x78, 0xd1, 0x5a, 0x17, 0x61, 0x9b, 0x10, 0xea, 0x89, 0x94, 0x47, 0xdf, + 0x8b, 0x6b, 0x25, 0xce, 0xcb, 0xda, 0x9b, 0x93, 0x3c, 0x5d, 0x1f, 0xa9, 0x5e, 0xb4, 0x7b, 0xad, + 0x5e, 0xf4, 0x2e, 0x54, 0x5c, 0x12, 0x04, 0x4e, 0xe8, 0xd9, 0x2e, 0x99, 0x87, 0x5c, 0x39, 0x5a, + 0x56, 0xc4, 0xb6, 0xa0, 0xdd, 0xfb, 0xbd, 0x06, 0x45, 0xf9, 0x59, 0x24, 0x9f, 0x9f, 0x25, 0xc8, + 0x9f, 0x0f, 0x1e, 0x0d, 0x86, 0x3f, 0x19, 0xe8, 0x3b, 0xa8, 0x00, 0xd9, 0xc1, 0x79, 0xaf, 0xa7, + 0xe7, 0x51, 0x1e, 0x32, 0xc6, 0xc0, 0xd2, 0x35, 0x54, 0x84, 0xdc, 0xfb, 0xbd, 0xe1, 0xa9, 0xa5, + 0xef, 0x8a, 0x65, 0xeb, 0xa7, 0x56, 0x77, 0xa4, 0x67, 0xd0, 0x4d, 0xb8, 0xd1, 0xe9, 0xf6, 0x8c, + 0xbe, 0x61, 0x75, 0x3b, 0x76, 0x4c, 0x2c, 0x08, 0x6d, 0xcb, 0xe8, 0x77, 0xf5, 0xac, 0x00, 0xed, + 0x74, 0xdb, 0x46, 0xff, 0xb4, 0xa7, 0xe7, 0xd0, 0x21, 0x1c, 0x6c, 0x64, 0x13, 0x72, 0x11, 0x95, + 0xa1, 0xd0, 0x39, 0x37, 0x4f, 0x2d, 0x63, 0x38, 0xd0, 0xf7, 0x04, 0xb6, 0x75, 0x7e, 0xd6, 0xeb, + 0xea, 0x80, 0xf6, 0x01, 0x04, 0xcc, 0xa8, 0x6b, 0x1a, 0xdd, 0x91, 0xee, 0xdd, 0xfb, 0x11, 0x1c, + 0x5c, 0x7a, 0x41, 0xa3, 0x1b, 0x50, 0x3a, 0xed, 0x74, 0x6c, 0xb3, 0x7b, 0xd6, 0x33, 0xda, 0xa7, + 0xfa, 0x0e, 0x42, 0xb0, 0x6f, 0x76, 0xfb, 0xc3, 0x8f, 0xba, 0x6b, 0x9a, 0x76, 0x94, 0xfd, 0xe4, + 0x37, 0xb5, 0x9d, 0x7b, 0x2d, 0x38, 0xb8, 0xd4, 0x4c, 0x84, 0x87, 0x67, 0xdd, 0x41, 0xc7, 0x18, + 0x7c, 0xa0, 0xef, 0xa0, 0x0a, 0x14, 0xdb, 0xc3, 0x7e, 0xdf, 0xb0, 0xac, 0x6e, 0x47, 0xd7, 0x04, + 0xef, 0xb4, 0x35, 0x34, 0xc5, 0x66, 0x37, 0xc6, 0x68, 0x7d, 0xfd, 0xe5, 0xdf, 0x6a, 0x3b, 0x2f, + 0x57, 0x35, 0xed, 0xf3, 0x55, 0x4d, 0xfb, 0xf3, 0xaa, 0xa6, 0xfd, 0x75, 0x55, 0xd3, 0x3e, 0xfd, + 0x7b, 0x6d, 0xe7, 0x71, 0x5e, 0xdd, 0xc7, 0x78, 0x4f, 0x7e, 0x7c, 0x3f, 0xf8, 0x77, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xb6, 0xbd, 0xd5, 0x58, 0x12, 0x10, 0x00, 0x00, } diff --git a/pkg/roachpb/data.proto b/pkg/roachpb/data.proto index fb34fb488015..079835cfca31 100644 --- a/pkg/roachpb/data.proto +++ b/pkg/roachpb/data.proto @@ -422,3 +422,19 @@ message AbortSpanEntry { // The priority of the transaction. int32 priority = 3; } + +// TxnCoordMeta is metadata held by a transaction coordinator. This +// message is defined here because it is used in several layers of the +// system (internal/client, sql/distsqlrun, kv). +message TxnCoordMeta { + // txn is a copy of the transaction record, updated with each request. + Transaction txn = 1 [(gogoproto.nullable) = false]; + // intents stores key spans affected by this transaction through + // this coordinator. These spans allow the coordinator to set the + // list of intent spans in the EndTransactionRequest when the + // transaction is finalized. + repeated Span intents = 2 [(gogoproto.nullable) = false]; + // command_count indicates how many requests have been sent through + // this transaction. Reset on retryable txn errors. + int32 command_count = 3; +} diff --git a/pkg/server/node.go b/pkg/server/node.go index fc44ae7cad07..ba2af8b7034d 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -190,8 +190,8 @@ func bootstrapCluster( cfg.AmbientCtx.Tracer = tr // Create a KV DB with a local sender. stores := storage.NewStores(cfg.AmbientCtx, cfg.Clock, cfg.Settings.Version.MinSupportedVersion, cfg.Settings.Version.ServerVersion) - sender := kv.NewTxnCoordSender(cfg.AmbientCtx, cfg.Settings, stores, cfg.Clock, false, stopper, txnMetrics) - cfg.DB = client.NewDB(sender, cfg.Clock) + tcsFactory := kv.NewTxnCoordSenderFactory(cfg.AmbientCtx, cfg.Settings, stores, cfg.Clock, false /* linearizable */, stopper, txnMetrics) + cfg.DB = client.NewDB(tcsFactory, cfg.Clock) cfg.Transport = storage.NewDummyRaftTransport(cfg.Settings) if err := cfg.Settings.InitializeVersion(bootstrapVersion); err != nil { return uuid.UUID{}, errors.Wrap(err, "while initializing cluster version") diff --git a/pkg/server/node_test.go b/pkg/server/node_test.go index 5331fb844e25..46ef8d9822f6 100644 --- a/pkg/server/node_test.go +++ b/pkg/server/node_test.go @@ -85,16 +85,16 @@ func createTestNode( RPCContext: nodeRPCContext, RPCRetryOptions: &retryOpts, }, cfg.Gossip) - sender := kv.NewTxnCoordSender( + tsf := kv.NewTxnCoordSenderFactory( cfg.AmbientCtx, st, distSender, cfg.Clock, - false, + false, /* linearizable */ stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), ) - cfg.DB = client.NewDB(sender, cfg.Clock) + cfg.DB = client.NewDB(tsf, cfg.Clock) cfg.Transport = storage.NewDummyRaftTransport(st) active, renewal := cfg.NodeLivenessDurations() cfg.HistogramWindowInterval = metric.TestSampleInterval diff --git a/pkg/server/server.go b/pkg/server/server.go index f457c42fb2c7..795e5e7e7058 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -118,7 +118,7 @@ type Server struct { gossip *gossip.Gossip nodeLiveness *storage.NodeLiveness storePool *storage.StorePool - txnCoordSender *kv.TxnCoordSender + tcsFactory *kv.TxnCoordSenderFactory distSender *kv.DistSender db *client.DB pgServer *pgwire.Server @@ -247,7 +247,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { txnMetrics := kv.MakeTxnMetrics(s.cfg.HistogramWindowInterval()) s.registry.AddMetricStruct(txnMetrics) - s.txnCoordSender = kv.NewTxnCoordSender( + s.tcsFactory = kv.NewTxnCoordSenderFactory( s.cfg.AmbientCtx, st, s.distSender, @@ -256,7 +256,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { s.stopper, txnMetrics, ) - s.db = client.NewDB(s.txnCoordSender, s.clock) + s.db = client.NewDB(s.tcsFactory, s.clock) nlActive, nlRenewal := s.cfg.NodeLivenessDurations() @@ -405,11 +405,10 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { Settings: st, DB: s.db, Executor: sqlExecutor, - // DistSQL also uses a DB that bypasses the TxnCoordSender. - FlowDB: client.NewDB(s.distSender, s.clock), - RPCContext: s.rpcContext, - Stopper: s.stopper, - NodeID: &s.nodeIDContainer, + FlowDB: client.NewDB(s.tcsFactory, s.clock), + RPCContext: s.rpcContext, + Stopper: s.stopper, + NodeID: &s.nodeIDContainer, TempStorage: tempEngine, DiskMonitor: s.cfg.TempStorageConfig.Mon, diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index df12835d6e79..c49be6b8f526 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -94,7 +94,7 @@ func TestServerStartClock(t *testing.T) { Span: roachpb.Span{Key: roachpb.Key("a")}, } if _, err := client.SendWrapped( - context.Background(), s.KVClient().(*client.DB).GetSender(), get, + context.Background(), s.DB().GetSender(), get, ); err != nil { t.Fatal(err) } diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index a8bf6754f614..26572f6b3bb0 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -533,9 +533,6 @@ func (ts *TestServer) MustGetSQLNetworkCounter(name string) int64 { return c } -// KVClient is part of TestServerInterface. -func (ts *TestServer) KVClient() interface{} { return ts.db } - // LeaseManager is part of TestServerInterface. func (ts *TestServer) LeaseManager() interface{} { return ts.leaseMgr @@ -589,7 +586,7 @@ func (ts *TestServer) GetFirstStoreID() roachpb.StoreID { // LookupRange returns the descriptor of the range containing key. func (ts *TestServer) LookupRange(key roachpb.Key) (roachpb.RangeDescriptor, error) { rs, _, err := client.RangeLookupForVersion(context.Background(), ts.ClusterSettings(), - ts.DistSender(), key, roachpb.CONSISTENT, 0 /* prefetchNum */, false /* reverse */) + ts.DB().GetSender(), key, roachpb.CONSISTENT, 0 /* prefetchNum */, false /* reverse */) if err != nil { return roachpb.RangeDescriptor{}, errors.Errorf( "%q: lookup range unexpected error: %s", key, err) @@ -618,7 +615,7 @@ func (ts *TestServer) SplitRange( }, SplitKey: splitKey, } - _, pErr := client.SendWrapped(ctx, ts.DistSender(), &splitReq) + _, pErr := client.SendWrapped(ctx, ts.DB().GetSender(), &splitReq) if pErr != nil { return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, errors.Errorf( diff --git a/pkg/sql/create_test.go b/pkg/sql/create_test.go index d92f5bab8293..bc4156297cc9 100644 --- a/pkg/sql/create_test.go +++ b/pkg/sql/create_test.go @@ -22,7 +22,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -225,7 +224,7 @@ func verifyTables( for id := range completed { count++ tableName := fmt.Sprintf("table_%d", id) - kvDB := tc.Servers[count%tc.NumServers()].KVClient().(*client.DB) + kvDB := tc.Servers[count%tc.NumServers()].DB() tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", tableName) if tableDesc.ID < descIDStart { t.Fatalf( @@ -253,7 +252,7 @@ func verifyTables( // Check that no extra descriptors have been written in the range // descIDStart..maxID. - kvDB := tc.Servers[0].KVClient().(*client.DB) + kvDB := tc.Servers[0].DB() for id := descIDStart; id < maxID; id++ { if _, ok := tableIDs[id]; ok { continue @@ -286,7 +285,7 @@ func TestParallelCreateTables(t *testing.T) { t.Fatal(err) } // Get the id descriptor generator count. - kvDB := tc.Servers[0].KVClient().(*client.DB) + kvDB := tc.Servers[0].DB() var descIDStart sqlbase.ID if descID, err := kvDB.Get(context.Background(), keys.DescIDGenerator); err != nil { t.Fatal(err) @@ -340,7 +339,7 @@ func TestParallelCreateConflictingTables(t *testing.T) { } // Get the id descriptor generator count. - kvDB := tc.Servers[0].KVClient().(*client.DB) + kvDB := tc.Servers[0].DB() var descIDStart sqlbase.ID if descID, err := kvDB.Get(context.Background(), keys.DescIDGenerator); err != nil { t.Fatal(err) diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index b72b37a510be..731c30efee7b 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -80,7 +80,7 @@ type DistSQLPlanner struct { // pool of workers. runnerChan chan runnerRequest - // gossip handle use to check node version compatibility + // gossip handle used to check node version compatibility. gossip *gossip.Gossip } @@ -90,7 +90,7 @@ const resolverPolicy = distsqlplan.BinPackingLeaseHolderChoice // debugging). var logPlanDiagram = envutil.EnvOrDefaultBool("COCKROACH_DISTSQL_LOG_PLAN", false) -// If true, for index joins we instantiate a join reader on every node that +// If true, for index joins we instantiate a join reader on every node that // has a stream (usually from a table reader). If false, there is a single join // reader. var distributeIndexJoin = settings.RegisterBoolSetting( diff --git a/pkg/sql/distsql_physical_planner_test.go b/pkg/sql/distsql_physical_planner_test.go index e4f0815fcbca..b01f2f108f11 100644 --- a/pkg/sql/distsql_physical_planner_test.go +++ b/pkg/sql/distsql_physical_planner_test.go @@ -110,7 +110,7 @@ func TestPlanningDuringSplits(t *testing.T) { // Start a worker that continuously performs splits in the background. tc.Stopper().RunWorker(context.TODO(), func(ctx context.Context) { rng, _ := randutil.NewPseudoRand() - cdb := tc.Server(0).KVClient().(*client.DB) + cdb := tc.Server(0).DB() for { select { case <-tc.Stopper().ShouldStop(): diff --git a/pkg/sql/distsql_plan_backfill_test.go b/pkg/sql/distsql_plan_backfill_test.go index f6d1604b52da..fccbd33f4749 100644 --- a/pkg/sql/distsql_plan_backfill_test.go +++ b/pkg/sql/distsql_plan_backfill_test.go @@ -20,7 +20,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -63,7 +62,7 @@ func TestDistBackfill(t *testing.T) { }, }) defer tc.Stopper().Stop(context.TODO()) - cdb := tc.Server(0).KVClient().(*client.DB) + cdb := tc.Server(0).DB() sqlutils.CreateTable( t, tc.ServerConn(0), "numtosquare", "x INT PRIMARY KEY, xsquared INT", diff --git a/pkg/sql/distsql_running.go b/pkg/sql/distsql_running.go index fc08060ffe5a..30c5971829d0 100644 --- a/pkg/sql/distsql_running.go +++ b/pkg/sql/distsql_running.go @@ -242,10 +242,10 @@ type distSQLReceiver struct { // Once set, no more rows are accepted. err error - // canceled is atomically set to 1 when this distSQL receiver has been marked - // as canceled. Upon the next Push(), err is set to a non-nil - // value, and ConsumerClosed is the ConsumerStatus. - canceled int32 + // canceled is atomically set to an error when this distSQL receiver + // has been marked as canceled. Upon the next Push(), err is set to + // this value, and ConsumerClosed is the ConsumerStatus. + canceled atomic.Value row tree.Datums status distsqlrun.ConsumerStatus @@ -255,9 +255,8 @@ type distSQLReceiver struct { rangeCache *kv.RangeDescriptorCache leaseCache *kv.LeaseHolderCache - // The transaction in which the flow producing data for this receiver runs. - // The distSQLReceiver updates the TransactionProto in response to - // RetryableTxnError's. Nil if no transaction should be updated on errors + // The transaction in which the flow producing data for this + // receiver runs. Nil if no transaction should be updated on errors // (i.e. if the flow overall doesn't run in a transaction). txn *client.Txn @@ -284,9 +283,9 @@ var _ distsqlrun.CancellableRowReceiver = &distSQLReceiver{} // makeDistSQLReceiver creates a distSQLReceiver. // -// ctx is the Context that the receiver will use throughput its lifetime. -// sink is the container where the results will be stored. If only the row count -// is needed, this can be nil. +// ctx is the Context that the receiver will use throughput its +// lifetime. resultWriter is the container where the results will be +// stored. If only the row count is needed, this can be nil. // // txn is the transaction in which the producer flow runs; it will be updated // on errors. Nil if the flow overall doesn't run in a transaction. @@ -298,7 +297,7 @@ func makeDistSQLReceiver( txn *client.Txn, updateClock func(observedTs hlc.Timestamp), ) distSQLReceiver { - return distSQLReceiver{ + r := distSQLReceiver{ ctx: ctx, resultWriter: resultWriter, rangeCache: rangeCache, @@ -306,6 +305,14 @@ func makeDistSQLReceiver( txn: txn, updateClock: updateClock, } + // Make sure that if the root transaction finishes (i.e. it is abandoned, + // aborted, or committed), we will cancel the dist SQL flow. + if r.txn != nil { + r.txn.OnFinish(func(err error) { + r.canceled.Store(err) + }) + } + return r } // Push is part of the RowReceiver interface. @@ -315,21 +322,8 @@ func (r *distSQLReceiver) Push( if meta != nil { if meta.Err != nil && r.err == nil { if r.txn != nil { - if retryErr, ok := meta.Err.(*roachpb.UnhandledRetryableError); ok { - // Update the txn in response to remote errors. In the non-DistSQL - // world, the TxnCoordSender does this, and the client.Txn updates - // itself in non-error cases. Those updates are not necessary if we're - // just doing reads. Once DistSQL starts performing writes, we'll need - // to perform such updates too. - r.txn.UpdateStateOnRemoteRetryableErr(r.ctx, retryErr.PErr) - // Update the clock with information from the error. On non-DistSQL - // code paths, the DistSender does this. - // TODO(andrei): We don't propagate clock signals on success cases - // through DistSQL; we should. We also don't propagate them through - // non-retryable errors; we also should. - r.updateClock(retryErr.PErr.Now) - meta.Err = roachpb.NewHandledRetryableTxnError( - meta.Err.Error(), r.txn.Proto().ID, *r.txn.Proto()) + if retryErr, ok := meta.Err.(*roachpb.HandledRetryableTxnError); ok { + r.txn.UpdateStateOnRemoteRetryableErr(r.ctx, retryErr) } } r.err = meta.Err @@ -347,11 +341,18 @@ func (r *distSQLReceiver) Push( r.err = errors.Errorf("error ingesting remote spans: %s", err) } } + if meta.TxnMeta != nil { + if r.txn != nil { + r.txn.AugmentTxnCoordMeta(*meta.TxnMeta) + } else { + r.err = errors.Errorf("received a leaf TxnCoordMeta (%s); but have no root", meta.TxnMeta) + } + } return r.status } - if r.err == nil && atomic.LoadInt32(&r.canceled) == 1 { + if r.err == nil && r.canceled.Load() != nil { // Set the error to reflect query cancellation. - r.err = sqlbase.NewQueryCanceledError() + r.err = r.canceled.Load().(error) } if r.err != nil { // TODO(andrei): We should drain here. @@ -392,6 +393,9 @@ func (r *distSQLReceiver) Push( // ProducerDone is part of the RowReceiver interface. func (r *distSQLReceiver) ProducerDone() { + if r.txn != nil { + r.txn.OnFinish(nil) + } if r.closed { panic("double close") } @@ -400,7 +404,7 @@ func (r *distSQLReceiver) ProducerDone() { // SetCanceled is part of the CancellableRowReceiver interface. func (r *distSQLReceiver) SetCanceled() { - atomic.StoreInt32(&r.canceled, 1) + r.canceled.Store(sqlbase.NewQueryCanceledError()) } // updateCaches takes information about some ranges that were mis-planned and diff --git a/pkg/sql/distsqlplan/aggregator_funcs_test.go b/pkg/sql/distsqlplan/aggregator_funcs_test.go index 099a8df9f6b5..2bcbecb9a741 100644 --- a/pkg/sql/distsqlplan/aggregator_funcs_test.go +++ b/pkg/sql/distsqlplan/aggregator_funcs_test.go @@ -86,6 +86,9 @@ func runTestFlow( for { row, meta := rowBuf.Next() if meta != nil { + if meta.TxnMeta != nil { + continue + } t.Fatalf("unexpected metadata: %v", meta) } if row == nil { @@ -146,7 +149,7 @@ func checkDistAggregationInfo( } } - txn := client.NewTxn(srv.KVClient().(*client.DB), srv.NodeID()) + txn := client.NewTxn(srv.DB(), srv.NodeID(), client.RootTxn) // First run a flow that aggregates all the rows without any local stages. @@ -388,7 +391,7 @@ func TestDistAggregationTable(t *testing.T) { }, ) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() desc := sqlbase.GetTableDescriptor(kvDB, "test", "t") for fn, info := range DistAggregationTable { diff --git a/pkg/sql/distsqlplan/fake_span_resolver_test.go b/pkg/sql/distsqlplan/fake_span_resolver_test.go index b06a3c17e90e..174415aef63c 100644 --- a/pkg/sql/distsqlplan/fake_span_resolver_test.go +++ b/pkg/sql/distsqlplan/fake_span_resolver_test.go @@ -51,9 +51,9 @@ func TestFakeSpanResolver(t *testing.T) { resolver := distsqlutils.FakeResolverForTestCluster(tc) - db := tc.Server(0).KVClient().(*client.DB) + db := tc.Server(0).DB() - txn := client.NewTxn(db, tc.Server(0).NodeID()) + txn := client.NewTxn(db, tc.Server(0).NodeID(), client.RootTxn) it := resolver.NewSpanResolverIterator(txn) tableDesc := sqlbase.GetTableDescriptor(db, "test", "t") diff --git a/pkg/sql/distsqlplan/span_resolver_test.go b/pkg/sql/distsqlplan/span_resolver_test.go index 9c6c49f93e3e..2bf7054295f6 100644 --- a/pkg/sql/distsqlplan/span_resolver_test.go +++ b/pkg/sql/distsqlplan/span_resolver_test.go @@ -52,7 +52,7 @@ func TestSpanResolverUsesCaches(t *testing.T) { defer tc.Stopper().Stop(context.TODO()) rowRanges, _ := setupRanges( - tc.Conns[0], tc.Servers[0], tc.Servers[0].KVClient().(*client.DB), t) + tc.Conns[0], tc.Servers[0], tc.Servers[0].DB(), t) // Replicate the row ranges on all of the first 3 nodes. Save the 4th node in // a pristine state, with empty caches. diff --git a/pkg/sql/distsqlrun/base.go b/pkg/sql/distsqlrun/base.go index eb4d8406a6ce..e220559ee3f9 100644 --- a/pkg/sql/distsqlrun/base.go +++ b/pkg/sql/distsqlrun/base.go @@ -312,13 +312,17 @@ type RowChannelMsg struct { // ProducerMetadata represents a metadata record flowing through a DistSQL flow. type ProducerMetadata struct { // Only one of these fields will be set. If this ever changes, note that - // there's consumers out there that extract the error and, if there is one, + // there're consumers out there that extract the error and, if there is one, // forward it in isolation and drop the rest of the record. Ranges []roachpb.RangeInfo // TODO(vivek): change to type Error Err error // TraceData is sent if snowball tracing is enabled. TraceData []tracing.RecordedSpan + // TxnMeta contains the updated transaction coordinator metadata, + // to be sent from leaf transactions to augment the root transaction, + // held by the flow's ultimate receiver. + TxnMeta *roachpb.TxnCoordMeta } // RowChannel is a thin layer over a RowChannelMsg channel, which can be used to diff --git a/pkg/sql/distsqlrun/cluster_test.go b/pkg/sql/distsqlrun/cluster_test.go index acbed1765199..24535334f866 100644 --- a/pkg/sql/distsqlrun/cluster_test.go +++ b/pkg/sql/distsqlrun/cluster_test.go @@ -23,7 +23,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -63,7 +62,7 @@ func TestClusterFlow(t *testing.T) { numRows, sqlutils.ToRowFn(sqlutils.RowIdxFn, sumDigitsFn, sqlutils.RowEnglishFn)) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() desc := sqlbase.GetTableDescriptor(kvDB, "test", "t") makeIndexSpan := func(start, end int) TableReaderSpan { var span roachpb.Span @@ -253,6 +252,7 @@ func TestClusterFlow(t *testing.T) { rows, metas = testGetDecodedRows(t, &decoder, rows, metas) } metas = ignoreMisplannedRanges(metas) + metas = ignoreTxnMeta(metas) if len(metas) != 0 { t.Fatalf("unexpected metadata (%d): %+v", len(metas), metas) } @@ -285,6 +285,18 @@ func ignoreMisplannedRanges(metas []ProducerMetadata) []ProducerMetadata { return res } +// ignoreTxnMeta takes a slice of metadata and returns the entries excluding +// the transaction coordinator metadata. +func ignoreTxnMeta(metas []ProducerMetadata) []ProducerMetadata { + res := make([]ProducerMetadata, 0) + for _, m := range metas { + if m.TxnMeta == nil { + res = append(res, m) + } + } + return res +} + // TestLimitedBufferingDeadlock sets up a scenario which leads to deadlock if // a single consumer can block the entire router (#17097). func TestLimitedBufferingDeadlock(t *testing.T) { @@ -492,6 +504,7 @@ func TestLimitedBufferingDeadlock(t *testing.T) { rows, metas = testGetDecodedRows(t, &decoder, rows, metas) } metas = ignoreMisplannedRanges(metas) + metas = ignoreTxnMeta(metas) if len(metas) != 0 { t.Errorf("unexpected metadata (%d): %+v", len(metas), metas) } @@ -734,6 +747,7 @@ func BenchmarkInfrastructure(b *testing.B) { rows, metas = testGetDecodedRows(b, &decoder, rows, metas) } metas = ignoreMisplannedRanges(metas) + metas = ignoreTxnMeta(metas) if len(metas) != 0 { b.Fatalf("unexpected metadata (%d): %+v", len(metas), metas) } diff --git a/pkg/sql/distsqlrun/data.pb.go b/pkg/sql/distsqlrun/data.pb.go index c671373146bf..1ab63439caa7 100644 --- a/pkg/sql/distsqlrun/data.pb.go +++ b/pkg/sql/distsqlrun/data.pb.go @@ -7,6 +7,7 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import cockroach_roachpb3 "github.com/cockroachdb/cockroach/pkg/roachpb" +import cockroach_roachpb1 "github.com/cockroachdb/cockroach/pkg/roachpb" import cockroach_roachpb2 "github.com/cockroachdb/cockroach/pkg/roachpb" import cockroach_pgerror "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" import cockroach_sql_sqlbase1 "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -523,6 +524,7 @@ type RemoteProducerMetadata struct { // *RemoteProducerMetadata_RangeInfo // *RemoteProducerMetadata_Error // *RemoteProducerMetadata_TraceData_ + // *RemoteProducerMetadata_TxnMeta Value isRemoteProducerMetadata_Value `protobuf_oneof:"value"` } @@ -546,10 +548,14 @@ type RemoteProducerMetadata_Error struct { type RemoteProducerMetadata_TraceData_ struct { TraceData *RemoteProducerMetadata_TraceData `protobuf:"bytes,3,opt,name=trace_data,json=traceData,oneof"` } +type RemoteProducerMetadata_TxnMeta struct { + TxnMeta *cockroach_roachpb1.TxnCoordMeta `protobuf:"bytes,4,opt,name=txn_meta,json=txnMeta,oneof"` +} func (*RemoteProducerMetadata_RangeInfo) isRemoteProducerMetadata_Value() {} func (*RemoteProducerMetadata_Error) isRemoteProducerMetadata_Value() {} func (*RemoteProducerMetadata_TraceData_) isRemoteProducerMetadata_Value() {} +func (*RemoteProducerMetadata_TxnMeta) isRemoteProducerMetadata_Value() {} func (m *RemoteProducerMetadata) GetValue() isRemoteProducerMetadata_Value { if m != nil { @@ -579,12 +585,20 @@ func (m *RemoteProducerMetadata) GetTraceData() *RemoteProducerMetadata_TraceDat return nil } +func (m *RemoteProducerMetadata) GetTxnMeta() *cockroach_roachpb1.TxnCoordMeta { + if x, ok := m.GetValue().(*RemoteProducerMetadata_TxnMeta); ok { + return x.TxnMeta + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*RemoteProducerMetadata) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _RemoteProducerMetadata_OneofMarshaler, _RemoteProducerMetadata_OneofUnmarshaler, _RemoteProducerMetadata_OneofSizer, []interface{}{ (*RemoteProducerMetadata_RangeInfo)(nil), (*RemoteProducerMetadata_Error)(nil), (*RemoteProducerMetadata_TraceData_)(nil), + (*RemoteProducerMetadata_TxnMeta)(nil), } } @@ -607,6 +621,11 @@ func _RemoteProducerMetadata_OneofMarshaler(msg proto.Message, b *proto.Buffer) if err := b.EncodeMessage(x.TraceData); err != nil { return err } + case *RemoteProducerMetadata_TxnMeta: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TxnMeta); err != nil { + return err + } case nil: default: return fmt.Errorf("RemoteProducerMetadata.Value has unexpected type %T", x) @@ -641,6 +660,14 @@ func _RemoteProducerMetadata_OneofUnmarshaler(msg proto.Message, tag, wire int, err := b.DecodeMessage(msg) m.Value = &RemoteProducerMetadata_TraceData_{msg} return true, err + case 4: // value.txn_meta + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(cockroach_roachpb1.TxnCoordMeta) + err := b.DecodeMessage(msg) + m.Value = &RemoteProducerMetadata_TxnMeta{msg} + return true, err default: return false, nil } @@ -665,6 +692,11 @@ func _RemoteProducerMetadata_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s + case *RemoteProducerMetadata_TxnMeta: + s := proto.Size(x.TxnMeta) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -1388,6 +1420,20 @@ func (m *RemoteProducerMetadata_TraceData_) MarshalTo(dAtA []byte) (int, error) } return i, nil } +func (m *RemoteProducerMetadata_TxnMeta) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.TxnMeta != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintData(dAtA, i, uint64(m.TxnMeta.Size())) + n14, err := m.TxnMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} func (m *RemoteProducerMetadata_RangeInfos) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1723,6 +1769,15 @@ func (m *RemoteProducerMetadata_TraceData_) Size() (n int) { } return n } +func (m *RemoteProducerMetadata_TxnMeta) Size() (n int) { + var l int + _ = l + if m.TxnMeta != nil { + l = m.TxnMeta.Size() + n += 1 + l + sovData(uint64(l)) + } + return n +} func (m *RemoteProducerMetadata_RangeInfos) Size() (n int) { var l int _ = l @@ -3598,6 +3653,38 @@ func (m *RemoteProducerMetadata) Unmarshal(dAtA []byte) error { } m.Value = &RemoteProducerMetadata_TraceData_{v} iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxnMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthData + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &cockroach_roachpb1.TxnCoordMeta{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &RemoteProducerMetadata_TxnMeta{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipData(dAtA[iNdEx:]) @@ -3977,97 +4064,99 @@ var ( func init() { proto.RegisterFile("sql/distsqlrun/data.proto", fileDescriptorData) } var fileDescriptorData = []byte{ - // 1465 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4d, 0x6f, 0x1b, 0x41, - 0x19, 0xf6, 0xc6, 0xdf, 0xaf, 0x9d, 0xc4, 0x19, 0x55, 0x95, 0x31, 0xc1, 0x4e, 0x97, 0x82, 0x42, - 0x55, 0xd9, 0x6d, 0x38, 0x20, 0xd2, 0x43, 0x6b, 0xc7, 0x4e, 0x6c, 0x48, 0xe3, 0xb0, 0x4e, 0x41, - 0x6d, 0x91, 0x96, 0xcd, 0xee, 0xc4, 0x59, 0xb1, 0xde, 0xdd, 0xcc, 0xcc, 0x36, 0xc9, 0x85, 0x13, - 0x3f, 0xa0, 0x27, 0xc4, 0xb1, 0x27, 0x2e, 0xfc, 0x00, 0xfe, 0x01, 0xca, 0x91, 0x63, 0x05, 0x52, - 0x04, 0xe1, 0xc2, 0x1f, 0xe0, 0x40, 0x4f, 0x68, 0x66, 0x67, 0xfc, 0x91, 0xd4, 0x6d, 0xd3, 0x5e, - 0xac, 0x9d, 0x77, 0xde, 0xe7, 0x99, 0xe7, 0x7d, 0x66, 0xf6, 0xdd, 0x31, 0x7c, 0x87, 0x9e, 0x78, - 0x0d, 0xc7, 0xa5, 0x8c, 0x9e, 0x78, 0x24, 0xf2, 0x1b, 0x8e, 0xc5, 0xac, 0x7a, 0x48, 0x02, 0x16, - 0xa0, 0xb2, 0x1d, 0xd8, 0xbf, 0x25, 0x81, 0x65, 0x1f, 0xd7, 0xe9, 0x89, 0x57, 0x9f, 0x24, 0x55, - 0x56, 0x44, 0x34, 0x3c, 0x6c, 0x58, 0xa1, 0x1b, 0x27, 0x57, 0x90, 0x0a, 0x4d, 0x08, 0x2a, 0x77, - 0x54, 0x0c, 0x13, 0x12, 0x10, 0x2a, 0xa3, 0x35, 0xbe, 0x62, 0x38, 0x3c, 0x75, 0x09, 0x6e, 0x84, - 0x43, 0x31, 0x35, 0x9b, 0xb0, 0xca, 0x13, 0xe8, 0x89, 0x77, 0x68, 0x51, 0xdc, 0xa0, 0x8c, 0x44, - 0x36, 0x8b, 0x08, 0x76, 0xa6, 0xe1, 0x6a, 0x16, 0xfb, 0x76, 0xe0, 0x60, 0xc7, 0x74, 0x2c, 0x16, - 0x8d, 0x64, 0xc2, 0x5a, 0xc4, 0x5c, 0xaf, 0xc1, 0x88, 0x65, 0xbb, 0xfe, 0xb0, 0x41, 0xb0, 0x1d, - 0x10, 0x9e, 0x42, 0x43, 0xcb, 0x57, 0xba, 0x86, 0xc1, 0x30, 0x10, 0x8f, 0x0d, 0xfe, 0x14, 0x47, - 0xf5, 0xbf, 0x68, 0x90, 0xee, 0x70, 0x1d, 0xa8, 0x05, 0xb9, 0x70, 0x68, 0x0a, 0x4d, 0x65, 0x6d, - 0x4d, 0x5b, 0x2f, 0x6c, 0x94, 0xeb, 0x13, 0x2f, 0xa4, 0xe6, 0xba, 0xc8, 0x6d, 0x15, 0xae, 0x2e, - 0x6b, 0xd9, 0xfd, 0x1d, 0x31, 0xe8, 0x26, 0x8c, 0x6c, 0x38, 0x8c, 0x39, 0x5e, 0xc1, 0x0a, 0xc1, - 0x8c, 0x9c, 0x5b, 0x87, 0x1e, 0x3e, 0x38, 0xf3, 0x45, 0xb0, 0xbc, 0x20, 0xc8, 0x1e, 0x4c, 0x91, - 0x49, 0x87, 0xea, 0x2f, 0xfc, 0x63, 0xcb, 0x77, 0x3c, 0xec, 0x18, 0x0a, 0xa4, 0x18, 0x6f, 0xd2, - 0x6c, 0xa6, 0xfe, 0xf8, 0xae, 0x96, 0x68, 0xe5, 0x20, 0xe3, 0x60, 0x66, 0xb9, 0x9e, 0xbe, 0x0d, - 0xd0, 0x39, 0x0b, 0x09, 0xa6, 0xd4, 0x0d, 0x7c, 0x54, 0x85, 0xec, 0x1b, 0x4c, 0xf8, 0xa3, 0x10, - 0x9f, 0x6f, 0xa5, 0x2e, 0x2e, 0x6b, 0x09, 0x43, 0x05, 0x51, 0x19, 0x52, 0xf8, 0x2c, 0x8c, 0xc5, - 0xa8, 0x49, 0x11, 0xd1, 0x7f, 0xbf, 0x00, 0xb9, 0x3e, 0x71, 0x30, 0x71, 0xfd, 0x21, 0xea, 0x41, - 0xd6, 0x0e, 0xbc, 0x68, 0xe4, 0xd3, 0xb2, 0xb6, 0x96, 0x5c, 0x2f, 0x6c, 0xfc, 0xa8, 0x3e, 0xef, - 0x3c, 0xd4, 0x15, 0xa8, 0xbe, 0x25, 0x10, 0x6a, 0x45, 0x89, 0xaf, 0xfc, 0x49, 0x83, 0x4c, 0x3c, - 0x83, 0xbe, 0x27, 0x58, 0x4d, 0xd7, 0x39, 0x13, 0xe2, 0x16, 0x65, 0x6a, 0xc6, 0x0e, 0xbc, 0x9e, - 0x73, 0x86, 0x7e, 0x05, 0x79, 0xc7, 0x25, 0xd8, 0x66, 0x5c, 0x3d, 0x17, 0xb8, 0xb4, 0xf1, 0xe3, - 0x2f, 0x5e, 0xb6, 0xde, 0x56, 0x50, 0xc9, 0x3a, 0xe1, 0xd2, 0xab, 0x90, 0x1f, 0xcf, 0xa2, 0x2c, - 0x24, 0x9b, 0x83, 0xad, 0x52, 0x02, 0xe5, 0x20, 0xd5, 0xee, 0x0c, 0xb6, 0x4a, 0xda, 0x66, 0xea, - 0x3f, 0xef, 0x6a, 0xf2, 0x57, 0xff, 0xaf, 0x06, 0x68, 0xc0, 0x08, 0xb6, 0x46, 0x1d, 0xdf, 0x09, - 0x03, 0xd7, 0x67, 0x83, 0x10, 0xdb, 0xe8, 0xe7, 0x90, 0x62, 0xe7, 0x21, 0x16, 0xba, 0x97, 0x36, - 0x1e, 0xcf, 0x97, 0x75, 0x13, 0x5b, 0x3f, 0x38, 0x0f, 0xb1, 0xb2, 0x9a, 0x93, 0xa0, 0x9f, 0x42, - 0x9e, 0x8a, 0x34, 0xd3, 0x75, 0x44, 0xa1, 0xe9, 0xd6, 0x2a, 0x9f, 0xbe, 0xba, 0xac, 0xe5, 0x62, - 0x7c, 0xaf, 0xfd, 0x61, 0xea, 0xd9, 0xc8, 0xc5, 0xe9, 0x3d, 0x07, 0xfd, 0x00, 0x0a, 0xcc, 0x22, - 0x43, 0xcc, 0x4c, 0xcb, 0x71, 0x48, 0x39, 0x39, 0xb5, 0x8d, 0x10, 0x4f, 0x34, 0x1d, 0x87, 0xe8, - 0x8f, 0x20, 0xc5, 0x57, 0x45, 0x79, 0x48, 0xef, 0xf6, 0xb7, 0x9a, 0xbb, 0xa5, 0x04, 0x02, 0xc8, - 0x18, 0x9d, 0xe7, 0xfd, 0x83, 0x4e, 0x49, 0x43, 0x2b, 0xb0, 0x38, 0x78, 0xb9, 0xb7, 0x65, 0x1a, - 0x9d, 0xc1, 0x7e, 0x7f, 0x6f, 0xd0, 0x29, 0x2d, 0xe8, 0xff, 0x58, 0x80, 0xc5, 0x9e, 0x1f, 0x46, - 0x6c, 0x70, 0xee, 0xdb, 0xa2, 0xe4, 0xed, 0x99, 0x92, 0x1f, 0xce, 0x2f, 0x79, 0x06, 0x76, 0xb3, - 0xda, 0x36, 0xe4, 0x02, 0xb9, 0x57, 0xf2, 0x1d, 0xd0, 0x3f, 0xbf, 0xab, 0x92, 0x61, 0x8c, 0x44, - 0xbb, 0x90, 0x8d, 0x4d, 0xa0, 0xe5, 0xa4, 0x38, 0x91, 0x0f, 0x6f, 0xb3, 0x07, 0xea, 0x50, 0x4a, - 0x0a, 0xf4, 0x33, 0x28, 0xc6, 0xe7, 0xd3, 0xe4, 0x12, 0x69, 0x39, 0x25, 0x28, 0xef, 0x5d, 0xa3, - 0x94, 0x8d, 0x46, 0x9e, 0xb0, 0xa9, 0xc2, 0x0a, 0xf6, 0x38, 0x42, 0x75, 0x5d, 0x7a, 0xbd, 0x08, - 0xf9, 0x17, 0x7b, 0x7d, 0xa3, 0xdd, 0x31, 0x3a, 0xed, 0x52, 0x02, 0x15, 0x20, 0xab, 0x06, 0x9a, - 0xfe, 0xbf, 0x0c, 0x94, 0xfa, 0x11, 0x0b, 0x23, 0x66, 0x04, 0x11, 0xc3, 0x44, 0x18, 0xdc, 0x9b, - 0x31, 0xb8, 0xf1, 0x09, 0x53, 0xae, 0x21, 0x6f, 0x7a, 0x3c, 0xe5, 0xce, 0xc2, 0xb7, 0xbb, 0x73, - 0x0f, 0x8a, 0xc7, 0x16, 0x3d, 0x36, 0x55, 0x0b, 0xe0, 0x86, 0x2f, 0x1a, 0x05, 0x1e, 0x8b, 0xad, - 0xa0, 0xc8, 0x83, 0x15, 0x62, 0xf9, 0x43, 0x6c, 0x12, 0xa1, 0xca, 0xa4, 0x21, 0xb6, 0xcb, 0x29, - 0xb1, 0xbb, 0x9b, 0xb7, 0x28, 0xc4, 0xe0, 0x1c, 0x93, 0xb1, 0x14, 0xb2, 0x4c, 0x66, 0xc3, 0xe8, - 0x31, 0xac, 0x38, 0x2e, 0xe5, 0x6d, 0xd0, 0x3c, 0x8c, 0x8e, 0x8e, 0xe2, 0xb3, 0x94, 0x5e, 0xd3, - 0xd6, 0x73, 0x12, 0x51, 0x92, 0xd3, 0x2d, 0x35, 0x5b, 0xf9, 0x6b, 0x12, 0x96, 0xaf, 0xb1, 0xa3, - 0xd7, 0x90, 0xe6, 0x1f, 0x02, 0xd5, 0xd3, 0x9e, 0x7e, 0xbd, 0xd0, 0xfa, 0x20, 0xb4, 0x54, 0xa3, - 0x89, 0x39, 0xb9, 0x69, 0x0e, 0x3e, 0xb2, 0x22, 0x8f, 0x99, 0x0e, 0xa6, 0x2c, 0x7e, 0xaf, 0x8d, - 0x82, 0x8c, 0xb5, 0x31, 0x65, 0x68, 0x04, 0x79, 0xf1, 0xcd, 0x72, 0xfd, 0xa1, 0x3a, 0xc5, 0xbd, - 0x6f, 0xd0, 0x10, 0xef, 0x45, 0x47, 0x32, 0xaa, 0xb6, 0x37, 0x5e, 0xa1, 0xf2, 0x06, 0x96, 0x66, - 0x53, 0xd0, 0x2a, 0x64, 0xe2, 0x3d, 0xbd, 0xd1, 0x7f, 0x79, 0x7b, 0xde, 0x86, 0x9c, 0x02, 0xcb, - 0xf6, 0x7b, 0x7f, 0xce, 0x0b, 0xd1, 0xe6, 0x5f, 0xdc, 0x6b, 0x0b, 0x8f, 0xb1, 0x95, 0x5d, 0x48, - 0x71, 0x7b, 0xd0, 0x1d, 0x48, 0x53, 0x66, 0x11, 0x26, 0x16, 0x2b, 0x1a, 0xf1, 0x00, 0x95, 0x20, - 0x89, 0xfd, 0xb8, 0xed, 0x15, 0x0d, 0xfe, 0xc8, 0x55, 0xc5, 0x27, 0x4f, 0xb4, 0xb3, 0xb4, 0x52, - 0x15, 0xc7, 0xf4, 0xa7, 0xf2, 0xf5, 0x2a, 0x41, 0x71, 0xbf, 0x39, 0x18, 0x98, 0x07, 0x5d, 0xa3, - 0xff, 0x62, 0xa7, 0x1b, 0x77, 0xb4, 0xe7, 0x3d, 0xc3, 0xe8, 0x1b, 0x25, 0x8d, 0xbf, 0x6d, 0xad, - 0x97, 0x66, 0xb7, 0x39, 0xe8, 0x96, 0x16, 0x50, 0x11, 0x72, 0xad, 0x97, 0xa6, 0xd1, 0xdc, 0xdb, - 0xe9, 0x94, 0x92, 0xfa, 0x5b, 0x0d, 0xf2, 0x42, 0x70, 0xcf, 0x3f, 0x0a, 0x66, 0x8a, 0xd4, 0xbe, - 0xbe, 0x48, 0xf4, 0x44, 0xbe, 0xbc, 0x71, 0x47, 0xfb, 0xe2, 0xce, 0x21, 0x40, 0xfa, 0xef, 0x60, - 0x69, 0x9f, 0x04, 0x4e, 0x64, 0x63, 0xd2, 0xc5, 0x96, 0x83, 0x09, 0x7a, 0x0c, 0xd9, 0x23, 0x2f, - 0x38, 0xe5, 0x1f, 0x04, 0xe1, 0x56, 0xab, 0xcc, 0xd3, 0xff, 0x7e, 0x59, 0xcb, 0x6c, 0x7b, 0xc1, - 0x69, 0xaf, 0x7d, 0x35, 0x7e, 0x32, 0x32, 0x3c, 0xb1, 0xe7, 0x7c, 0xc3, 0x57, 0x44, 0xff, 0xb3, - 0x06, 0x45, 0x25, 0xa0, 0x6d, 0x31, 0x0b, 0x7d, 0x17, 0xf2, 0xc4, 0x3a, 0x35, 0x0f, 0xcf, 0x19, - 0xa6, 0x72, 0xbb, 0x72, 0xc4, 0x3a, 0x6d, 0xf1, 0x31, 0x32, 0x20, 0x37, 0xc2, 0xcc, 0xe2, 0x77, - 0x3b, 0xd9, 0x5d, 0x1e, 0xcd, 0x3f, 0xb5, 0x06, 0x1e, 0x05, 0x0c, 0x2b, 0xf2, 0xe7, 0x12, 0xa7, - 0xec, 0x53, 0x3c, 0xe8, 0x01, 0x2c, 0xf9, 0xd1, 0xc8, 0xc4, 0xa3, 0x90, 0x9d, 0x9b, 0x24, 0x38, - 0xa5, 0x33, 0x7b, 0x5f, 0xf4, 0xa3, 0x51, 0x87, 0x4f, 0x19, 0xc1, 0x29, 0xd5, 0xdf, 0x6b, 0xb0, - 0x3c, 0x21, 0xa4, 0xd4, 0x1a, 0x62, 0xf4, 0x0c, 0x32, 0xc7, 0xc2, 0x39, 0x79, 0x47, 0x5b, 0x9f, - 0xaf, 0x68, 0xd6, 0x69, 0x43, 0xe2, 0x50, 0x13, 0x32, 0xec, 0x3c, 0x8c, 0xcf, 0x3a, 0xaf, 0xe9, - 0xfb, 0xf3, 0x19, 0xc6, 0xa7, 0x47, 0x1d, 0xcd, 0x18, 0x88, 0x9e, 0x41, 0x4a, 0x98, 0x92, 0x14, - 0x12, 0x7e, 0xf8, 0x79, 0x09, 0xed, 0x89, 0x15, 0x02, 0xa9, 0x5f, 0x24, 0xe1, 0xee, 0xc7, 0x1d, - 0x43, 0xbf, 0x06, 0x88, 0x3b, 0xac, 0xeb, 0x1f, 0x05, 0xb2, 0xca, 0x27, 0xb7, 0xf5, 0x3d, 0xee, - 0x19, 0x5c, 0x3a, 0xed, 0x26, 0x8c, 0x3c, 0x51, 0x23, 0xf4, 0x13, 0x48, 0xe3, 0xa9, 0x5b, 0x69, - 0x6d, 0x3e, 0xb1, 0xba, 0x8a, 0xc6, 0xf9, 0xe8, 0x35, 0x00, 0xbf, 0x5d, 0x63, 0x73, 0xaa, 0xf2, - 0xcd, 0x5b, 0xcb, 0x3a, 0xe0, 0x14, 0xdc, 0x0d, 0xae, 0x8a, 0xa9, 0x41, 0xa5, 0x0f, 0x30, 0x11, - 0x8c, 0x9a, 0xd7, 0x1c, 0xe0, 0xbb, 0xb4, 0xfa, 0x91, 0xeb, 0xf3, 0x18, 0xa2, 0x5a, 0xe0, 0xb8, - 0xcc, 0xca, 0x6f, 0x20, 0x3f, 0x5e, 0x0a, 0x0d, 0x60, 0xd9, 0x0e, 0x3c, 0x0f, 0xdb, 0x4c, 0xfe, - 0x23, 0x50, 0x1f, 0x82, 0xe9, 0x0e, 0xc0, 0xff, 0x3f, 0xd4, 0xe5, 0xff, 0x87, 0xba, 0x21, 0xff, - 0x3f, 0x4c, 0x75, 0xfb, 0xa5, 0x31, 0x05, 0x0f, 0xd2, 0x56, 0x16, 0xd2, 0x6f, 0x2c, 0x2f, 0xc2, - 0xfa, 0x1f, 0x34, 0x28, 0xb7, 0x5d, 0xca, 0x06, 0xbf, 0xd8, 0xfd, 0x65, 0x7c, 0xd9, 0xde, 0x09, - 0x28, 0x75, 0x43, 0x61, 0xf7, 0xa3, 0xd9, 0x6b, 0xf9, 0x62, 0xeb, 0x2e, 0x27, 0xfb, 0x70, 0x59, - 0x5b, 0x9a, 0x85, 0x4c, 0x2e, 0xea, 0x5d, 0xb8, 0x33, 0x72, 0x7d, 0xd3, 0xb2, 0x6d, 0x1c, 0x72, - 0xbd, 0x0a, 0xbe, 0xf0, 0x49, 0x38, 0x1a, 0xb9, 0x7e, 0x53, 0x42, 0x64, 0xac, 0x75, 0xff, 0xe2, - 0x5f, 0xd5, 0xc4, 0xc5, 0x55, 0x55, 0xfb, 0xdb, 0x55, 0x55, 0x7b, 0x7f, 0x55, 0xd5, 0xfe, 0x79, - 0x55, 0xd5, 0xde, 0xfe, 0xbb, 0x9a, 0x78, 0x05, 0x93, 0x8d, 0xfa, 0x7f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x96, 0x92, 0x68, 0xce, 0x0b, 0x0e, 0x00, 0x00, + // 1496 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xcf, 0x6f, 0x1b, 0x4f, + 0x15, 0xf7, 0xc6, 0xbf, 0x9f, 0x9d, 0xc4, 0x19, 0x55, 0x5f, 0x19, 0x13, 0xec, 0x7c, 0x97, 0x2f, + 0x28, 0x54, 0x95, 0xdd, 0x86, 0x03, 0x22, 0x45, 0x6a, 0xed, 0xd8, 0x89, 0x0d, 0x69, 0x1c, 0xd6, + 0x29, 0xa8, 0x2d, 0xd2, 0xb2, 0xd9, 0x9d, 0x38, 0x2b, 0xd6, 0xbb, 0x9b, 0x99, 0xd9, 0xc6, 0xb9, + 0x70, 0x42, 0x9c, 0x7b, 0x42, 0x1c, 0x7b, 0xe2, 0xc2, 0x1f, 0xc0, 0x7f, 0x80, 0x7a, 0xe4, 0x58, + 0x81, 0x14, 0x41, 0xb8, 0xf0, 0x0f, 0x70, 0xa0, 0x27, 0x34, 0xb3, 0x33, 0xfe, 0x91, 0xd4, 0x6d, + 0xd3, 0x5e, 0xac, 0x99, 0x37, 0xef, 0x7d, 0xe6, 0xf3, 0x3e, 0xf3, 0xf6, 0xcd, 0x18, 0xbe, 0x45, + 0xcf, 0xbc, 0x86, 0xe3, 0x52, 0x46, 0xcf, 0x3c, 0x12, 0xf9, 0x0d, 0xc7, 0x62, 0x56, 0x3d, 0x24, + 0x01, 0x0b, 0x50, 0xd9, 0x0e, 0xec, 0xdf, 0x90, 0xc0, 0xb2, 0x4f, 0xeb, 0xf4, 0xcc, 0xab, 0x4f, + 0x9d, 0x2a, 0x6b, 0xc2, 0x1a, 0x1e, 0x37, 0xac, 0xd0, 0x8d, 0x9d, 0x2b, 0x48, 0x99, 0xa6, 0x00, + 0x95, 0x3b, 0xca, 0x86, 0x09, 0x09, 0x08, 0x95, 0xd6, 0x1a, 0xdf, 0x31, 0x1c, 0x9e, 0xbb, 0x04, + 0x37, 0xc2, 0xa1, 0x58, 0x9a, 0x77, 0x58, 0xe7, 0x0e, 0xf4, 0xcc, 0x3b, 0xb6, 0x28, 0x6e, 0x50, + 0x46, 0x22, 0x9b, 0x45, 0x04, 0x3b, 0xb3, 0xe1, 0x6a, 0x15, 0xfb, 0x76, 0xe0, 0x60, 0xc7, 0x74, + 0x2c, 0x16, 0x8d, 0xa4, 0xc3, 0x46, 0xc4, 0x5c, 0xaf, 0xc1, 0x88, 0x65, 0xbb, 0xfe, 0xb0, 0x41, + 0xb0, 0x1d, 0x10, 0xee, 0x42, 0x43, 0xcb, 0x57, 0xbc, 0x86, 0xc1, 0x30, 0x10, 0xc3, 0x06, 0x1f, + 0xc5, 0x56, 0xfd, 0x2f, 0x1a, 0xa4, 0x3b, 0x9c, 0x07, 0x6a, 0x41, 0x2e, 0x1c, 0x9a, 0x82, 0x53, + 0x59, 0xdb, 0xd0, 0x36, 0x0b, 0x5b, 0xe5, 0xfa, 0x54, 0x0b, 0xc9, 0xb9, 0x2e, 0x7c, 0x5b, 0x85, + 0xab, 0xcb, 0x5a, 0xf6, 0x70, 0x4f, 0x4c, 0xba, 0x09, 0x23, 0x1b, 0x0e, 0x63, 0x8c, 0xe7, 0xb0, + 0x46, 0x30, 0x23, 0x17, 0xd6, 0xb1, 0x87, 0x8f, 0xc6, 0xbe, 0x30, 0x96, 0x97, 0x04, 0xd8, 0xdd, + 0x19, 0x30, 0xa9, 0x50, 0xfd, 0xa9, 0x7f, 0x6a, 0xf9, 0x8e, 0x87, 0x1d, 0x43, 0x05, 0x29, 0xc4, + 0x9b, 0x30, 0xdb, 0xa9, 0x3f, 0xbe, 0xae, 0x25, 0x5a, 0x39, 0xc8, 0x38, 0x98, 0x59, 0xae, 0xa7, + 0xef, 0x02, 0x74, 0xc6, 0x21, 0xc1, 0x94, 0xba, 0x81, 0x8f, 0xaa, 0x90, 0x7d, 0x89, 0x09, 0x1f, + 0x0a, 0xf2, 0xf9, 0x56, 0xea, 0xcd, 0x65, 0x2d, 0x61, 0x28, 0x23, 0x2a, 0x43, 0x0a, 0x8f, 0xc3, + 0x98, 0x8c, 0x5a, 0x14, 0x16, 0xfd, 0x77, 0x4b, 0x90, 0xeb, 0x13, 0x07, 0x13, 0xd7, 0x1f, 0xa2, + 0x1e, 0x64, 0xed, 0xc0, 0x8b, 0x46, 0x3e, 0x2d, 0x6b, 0x1b, 0xc9, 0xcd, 0xc2, 0xd6, 0x0f, 0xea, + 0x8b, 0xea, 0xa1, 0xae, 0x82, 0xea, 0x3b, 0x22, 0x42, 0xed, 0x28, 0xe3, 0x2b, 0x7f, 0xd2, 0x20, + 0x13, 0xaf, 0xa0, 0xef, 0x08, 0x54, 0xd3, 0x75, 0xc6, 0x82, 0xdc, 0xb2, 0x74, 0xcd, 0xd8, 0x81, + 0xd7, 0x73, 0xc6, 0xe8, 0x97, 0x90, 0x77, 0x5c, 0x82, 0x6d, 0xc6, 0xd9, 0x73, 0x82, 0x2b, 0x5b, + 0x3f, 0xfc, 0xe4, 0x6d, 0xeb, 0x6d, 0x15, 0x2a, 0x51, 0xa7, 0x58, 0x7a, 0x15, 0xf2, 0x93, 0x55, + 0x94, 0x85, 0x64, 0x73, 0xb0, 0x53, 0x4a, 0xa0, 0x1c, 0xa4, 0xda, 0x9d, 0xc1, 0x4e, 0x49, 0xdb, + 0x4e, 0xfd, 0xe7, 0x75, 0x4d, 0xfe, 0xea, 0xff, 0xd5, 0x00, 0x0d, 0x18, 0xc1, 0xd6, 0xa8, 0xe3, + 0x3b, 0x61, 0xe0, 0xfa, 0x6c, 0x10, 0x62, 0x1b, 0xfd, 0x0c, 0x52, 0xec, 0x22, 0xc4, 0x82, 0xf7, + 0xca, 0xd6, 0x83, 0xc5, 0xb4, 0x6e, 0xc6, 0xd6, 0x8f, 0x2e, 0x42, 0xac, 0xa4, 0xe6, 0x20, 0xe8, + 0xc7, 0x90, 0xa7, 0xc2, 0xcd, 0x74, 0x1d, 0x91, 0x68, 0xba, 0xb5, 0xce, 0x97, 0xaf, 0x2e, 0x6b, + 0xb9, 0x38, 0xbe, 0xd7, 0x7e, 0x37, 0x33, 0x36, 0x72, 0xb1, 0x7b, 0xcf, 0x41, 0xdf, 0x83, 0x02, + 0xb3, 0xc8, 0x10, 0x33, 0xd3, 0x72, 0x1c, 0x52, 0x4e, 0xce, 0x1c, 0x23, 0xc4, 0x0b, 0x4d, 0xc7, + 0x21, 0xfa, 0x7d, 0x48, 0xf1, 0x5d, 0x51, 0x1e, 0xd2, 0xfb, 0xfd, 0x9d, 0xe6, 0x7e, 0x29, 0x81, + 0x00, 0x32, 0x46, 0xe7, 0x49, 0xff, 0xa8, 0x53, 0xd2, 0xd0, 0x1a, 0x2c, 0x0f, 0x9e, 0x1d, 0xec, + 0x98, 0x46, 0x67, 0x70, 0xd8, 0x3f, 0x18, 0x74, 0x4a, 0x4b, 0xfa, 0x3f, 0x96, 0x60, 0xb9, 0xe7, + 0x87, 0x11, 0x1b, 0x5c, 0xf8, 0xb6, 0x48, 0x79, 0x77, 0x2e, 0xe5, 0x7b, 0x8b, 0x53, 0x9e, 0x0b, + 0xbb, 0x99, 0x6d, 0x1b, 0x72, 0x81, 0x3c, 0x2b, 0xf9, 0x0d, 0xe8, 0x1f, 0x3f, 0x55, 0x89, 0x30, + 0x89, 0x44, 0xfb, 0x90, 0x8d, 0x45, 0xa0, 0xe5, 0xa4, 0xa8, 0xc8, 0x7b, 0xb7, 0x39, 0x03, 0x55, + 0x94, 0x12, 0x02, 0xfd, 0x14, 0x8a, 0x71, 0x7d, 0x9a, 0x9c, 0x22, 0x2d, 0xa7, 0x04, 0xe4, 0xd7, + 0xd7, 0x20, 0x65, 0xa3, 0x91, 0x15, 0x36, 0x93, 0x58, 0xc1, 0x9e, 0x58, 0xa8, 0xae, 0x4b, 0xad, + 0x97, 0x21, 0xff, 0xf4, 0xa0, 0x6f, 0xb4, 0x3b, 0x46, 0xa7, 0x5d, 0x4a, 0xa0, 0x02, 0x64, 0xd5, + 0x44, 0xd3, 0xff, 0x97, 0x81, 0x52, 0x3f, 0x62, 0x61, 0xc4, 0x8c, 0x20, 0x62, 0x98, 0x08, 0x81, + 0x7b, 0x73, 0x02, 0x37, 0x3e, 0x20, 0xca, 0xb5, 0xc8, 0x9b, 0x1a, 0xcf, 0xa8, 0xb3, 0xf4, 0xe5, + 0xea, 0x7c, 0x0d, 0xc5, 0x53, 0x8b, 0x9e, 0x9a, 0xaa, 0x05, 0x70, 0xc1, 0x97, 0x8d, 0x02, 0xb7, + 0xc5, 0x52, 0x50, 0xe4, 0xc1, 0x1a, 0xb1, 0xfc, 0x21, 0x36, 0x89, 0x60, 0x65, 0xd2, 0x10, 0xdb, + 0xe5, 0x94, 0x38, 0xdd, 0xed, 0x5b, 0x24, 0x62, 0x70, 0x8c, 0xe9, 0x5c, 0x12, 0x59, 0x25, 0xf3, + 0x66, 0xf4, 0x00, 0xd6, 0x1c, 0x97, 0xf2, 0x36, 0x68, 0x1e, 0x47, 0x27, 0x27, 0x71, 0x2d, 0xa5, + 0x37, 0xb4, 0xcd, 0x9c, 0x8c, 0x28, 0xc9, 0xe5, 0x96, 0x5a, 0xad, 0xfc, 0x35, 0x09, 0xab, 0xd7, + 0xd0, 0xd1, 0x0b, 0x48, 0xf3, 0x8b, 0x40, 0xf5, 0xb4, 0x47, 0x9f, 0x4f, 0xb4, 0x3e, 0x08, 0x2d, + 0xd5, 0x68, 0x62, 0x4c, 0x2e, 0x9a, 0x83, 0x4f, 0xac, 0xc8, 0x63, 0xa6, 0x83, 0x29, 0x8b, 0xbf, + 0x6b, 0xa3, 0x20, 0x6d, 0x6d, 0x4c, 0x19, 0x1a, 0x41, 0x5e, 0xdc, 0x59, 0xae, 0x3f, 0x54, 0x55, + 0xdc, 0xfb, 0x02, 0x0e, 0xf1, 0x59, 0x74, 0x24, 0xa2, 0x6a, 0x7b, 0x93, 0x1d, 0x2a, 0x2f, 0x61, + 0x65, 0xde, 0x05, 0xad, 0x43, 0x26, 0x3e, 0xd3, 0x1b, 0xfd, 0x97, 0xb7, 0xe7, 0x5d, 0xc8, 0xa9, + 0x60, 0xd9, 0x7e, 0xbf, 0x59, 0xf0, 0x41, 0xb4, 0xf9, 0x8d, 0x7b, 0x6d, 0xe3, 0x49, 0x6c, 0x65, + 0x1f, 0x52, 0x5c, 0x1e, 0x74, 0x07, 0xd2, 0x94, 0x59, 0x84, 0x89, 0xcd, 0x8a, 0x46, 0x3c, 0x41, + 0x25, 0x48, 0x62, 0x3f, 0x6e, 0x7b, 0x45, 0x83, 0x0f, 0x39, 0xab, 0xb8, 0xf2, 0x44, 0x3b, 0x4b, + 0x2b, 0x56, 0xb1, 0x4d, 0x7f, 0x24, 0x3f, 0xaf, 0x12, 0x14, 0x0f, 0x9b, 0x83, 0x81, 0x79, 0xd4, + 0x35, 0xfa, 0x4f, 0xf7, 0xba, 0x71, 0x47, 0x7b, 0xd2, 0x33, 0x8c, 0xbe, 0x51, 0xd2, 0xf8, 0xd7, + 0xd6, 0x7a, 0x66, 0x76, 0x9b, 0x83, 0x6e, 0x69, 0x09, 0x15, 0x21, 0xd7, 0x7a, 0x66, 0x1a, 0xcd, + 0x83, 0xbd, 0x4e, 0x29, 0xa9, 0xbf, 0xd2, 0x20, 0x2f, 0x08, 0xf7, 0xfc, 0x93, 0x60, 0x2e, 0x49, + 0xed, 0xf3, 0x93, 0x44, 0x0f, 0xe5, 0xc7, 0x1b, 0x77, 0xb4, 0x4f, 0xee, 0x1c, 0x22, 0x48, 0xff, + 0x2d, 0xac, 0x1c, 0x92, 0xc0, 0x89, 0x6c, 0x4c, 0xba, 0xd8, 0x72, 0x30, 0x41, 0x0f, 0x20, 0x7b, + 0xe2, 0x05, 0xe7, 0xfc, 0x42, 0x10, 0x6a, 0xb5, 0xca, 0xdc, 0xfd, 0xef, 0x97, 0xb5, 0xcc, 0xae, + 0x17, 0x9c, 0xf7, 0xda, 0x57, 0x93, 0x91, 0x91, 0xe1, 0x8e, 0x3d, 0xe7, 0x0b, 0x6e, 0x11, 0xfd, + 0xcf, 0x1a, 0x14, 0x15, 0x81, 0xb6, 0xc5, 0x2c, 0xf4, 0x6d, 0xc8, 0x13, 0xeb, 0xdc, 0x3c, 0xbe, + 0x60, 0x98, 0xca, 0xe3, 0xca, 0x11, 0xeb, 0xbc, 0xc5, 0xe7, 0xc8, 0x80, 0xdc, 0x08, 0x33, 0x8b, + 0xbf, 0xed, 0x64, 0x77, 0xb9, 0xbf, 0xb8, 0x6a, 0x0d, 0x3c, 0x0a, 0x18, 0x56, 0xe0, 0x4f, 0x64, + 0x9c, 0x92, 0x4f, 0xe1, 0xa0, 0xbb, 0xb0, 0xe2, 0x47, 0x23, 0x13, 0x8f, 0x42, 0x76, 0x61, 0x92, + 0xe0, 0x9c, 0xce, 0x9d, 0x7d, 0xd1, 0x8f, 0x46, 0x1d, 0xbe, 0x64, 0x04, 0xe7, 0x54, 0x7f, 0xab, + 0xc1, 0xea, 0x14, 0x90, 0x52, 0x6b, 0x88, 0xd1, 0x63, 0xc8, 0x9c, 0x0a, 0xe5, 0xe4, 0x1b, 0x6d, + 0x73, 0x31, 0xa3, 0x79, 0xa5, 0x0d, 0x19, 0x87, 0x9a, 0x90, 0x61, 0x17, 0x61, 0x5c, 0xeb, 0x3c, + 0xa7, 0xef, 0x2e, 0x46, 0x98, 0x54, 0x8f, 0x2a, 0xcd, 0x38, 0x10, 0x3d, 0x86, 0x94, 0x10, 0x25, + 0x29, 0x28, 0x7c, 0xff, 0xe3, 0x14, 0xda, 0x53, 0x29, 0x44, 0xa4, 0xfe, 0xfb, 0x14, 0x7c, 0xf5, + 0x7e, 0xc5, 0xd0, 0xaf, 0x00, 0xe2, 0x0e, 0xeb, 0xfa, 0x27, 0x81, 0xcc, 0xf2, 0xe1, 0x6d, 0x75, + 0x8f, 0x7b, 0x06, 0xa7, 0x4e, 0xbb, 0x09, 0x23, 0x4f, 0xd4, 0x0c, 0xfd, 0x08, 0xd2, 0x78, 0xe6, + 0x55, 0x5a, 0x5b, 0x0c, 0xac, 0x9e, 0xa2, 0xb1, 0x3f, 0x7a, 0x01, 0xc0, 0x5f, 0xd7, 0xd8, 0x9c, + 0xc9, 0x7c, 0xfb, 0xd6, 0xb4, 0x8e, 0x38, 0x04, 0x57, 0x83, 0xb3, 0x62, 0x6a, 0x82, 0x7e, 0x02, + 0x39, 0x36, 0xf6, 0x4d, 0x5e, 0x25, 0xf2, 0x32, 0xa9, 0xbd, 0xe7, 0xb9, 0x7c, 0x34, 0xf6, 0x77, + 0x82, 0x80, 0x38, 0x1c, 0x8d, 0xbf, 0xba, 0xd9, 0xd8, 0xe7, 0xc3, 0x4a, 0x1f, 0x60, 0x9a, 0x2e, + 0x6a, 0x5e, 0xd3, 0x8f, 0x9f, 0xf1, 0xfa, 0x7b, 0xd0, 0x26, 0x21, 0xaa, 0x81, 0x4e, 0x44, 0xaa, + 0xfc, 0x1a, 0xf2, 0x13, 0xa2, 0x68, 0x00, 0xab, 0x76, 0xe0, 0x79, 0xd8, 0x66, 0xf2, 0xff, 0x84, + 0xba, 0x46, 0x66, 0xfb, 0x07, 0xff, 0xf7, 0x51, 0x97, 0xff, 0x3e, 0xea, 0x86, 0xfc, 0xf7, 0x31, + 0x73, 0x57, 0xac, 0x4c, 0x20, 0xb8, 0x91, 0xb6, 0xb2, 0x90, 0x7e, 0x69, 0x79, 0x11, 0xd6, 0xff, + 0xa0, 0x41, 0xb9, 0xed, 0x52, 0x36, 0xf8, 0xf9, 0xfe, 0x2f, 0xe2, 0xa7, 0xfa, 0x5e, 0x40, 0xa9, + 0x1b, 0x8a, 0xc3, 0xba, 0x3f, 0xff, 0xa8, 0x5f, 0x6e, 0x7d, 0xc5, 0xc1, 0xde, 0x5d, 0xd6, 0x56, + 0xe6, 0x43, 0xa6, 0xcf, 0xfc, 0x2e, 0xdc, 0x19, 0xb9, 0xbe, 0x69, 0xd9, 0x36, 0x0e, 0x39, 0x5f, + 0x15, 0xbe, 0xf4, 0xc1, 0x70, 0x34, 0x72, 0xfd, 0xa6, 0x0c, 0x91, 0xb6, 0xd6, 0x37, 0x6f, 0xfe, + 0x55, 0x4d, 0xbc, 0xb9, 0xaa, 0x6a, 0x7f, 0xbb, 0xaa, 0x6a, 0x6f, 0xaf, 0xaa, 0xda, 0x3f, 0xaf, + 0xaa, 0xda, 0xab, 0x7f, 0x57, 0x13, 0xcf, 0x61, 0x7a, 0xcc, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, + 0x8c, 0xeb, 0x6c, 0xcb, 0x49, 0x0e, 0x00, 0x00, } diff --git a/pkg/sql/distsqlrun/data.proto b/pkg/sql/distsqlrun/data.proto index eeafabca2f06..a0501b77f431 100644 --- a/pkg/sql/distsqlrun/data.proto +++ b/pkg/sql/distsqlrun/data.proto @@ -242,6 +242,7 @@ message RemoteProducerMetadata { RangeInfos range_info = 1; Error error = 2; TraceData trace_data = 3; + roachpb.TxnCoordMeta txn_meta = 4; } } diff --git a/pkg/sql/distsqlrun/interleaved_reader_joiner_test.go b/pkg/sql/distsqlrun/interleaved_reader_joiner_test.go index c8fa7b68d8a4..2c30d7187d07 100644 --- a/pkg/sql/distsqlrun/interleaved_reader_joiner_test.go +++ b/pkg/sql/distsqlrun/interleaved_reader_joiner_test.go @@ -399,9 +399,8 @@ func TestInterleavedReaderJoiner(t *testing.T) { Ctx: context.Background(), EvalCtx: evalCtx, Settings: s.ClusterSettings(), - // Pass a DB without a TxnCoordSender. - txn: client.NewTxn(client.NewDB(s.DistSender(), s.Clock()), s.NodeID()), - nodeID: s.NodeID(), + txn: client.NewTxn(s.DB(), s.NodeID(), client.RootTxn), + nodeID: s.NodeID(), } out := &RowBuffer{} @@ -528,9 +527,8 @@ func TestInterleavedReaderJoinerErrors(t *testing.T) { flowCtx := FlowCtx{ EvalCtx: evalCtx, Settings: s.ClusterSettings(), - // Pass a DB without a TxnCoordSender. - txn: client.NewTxn(client.NewDB(s.DistSender(), s.Clock()), s.NodeID()), - nodeID: s.NodeID(), + txn: client.NewTxn(s.DB(), s.NodeID(), client.RootTxn), + nodeID: s.NodeID(), } out := &RowBuffer{} diff --git a/pkg/sql/distsqlrun/joinreader_test.go b/pkg/sql/distsqlrun/joinreader_test.go index fb9aa54238a1..d9482cce296a 100644 --- a/pkg/sql/distsqlrun/joinreader_test.go +++ b/pkg/sql/distsqlrun/joinreader_test.go @@ -106,8 +106,7 @@ func TestJoinReader(t *testing.T) { Ctx: context.Background(), EvalCtx: evalCtx, Settings: cluster.MakeTestingClusterSettings(), - // Pass a DB without a TxnCoordSender. - txn: client.NewTxn(client.NewDB(s.DistSender(), s.Clock()), s.NodeID()), + txn: client.NewTxn(s.DB(), s.NodeID(), client.RootTxn), } encRows := make(sqlbase.EncDatumRows, len(c.input)) @@ -175,8 +174,7 @@ func TestJoinReaderDrain(t *testing.T) { Ctx: context.Background(), EvalCtx: evalCtx, Settings: s.ClusterSettings(), - // Pass a DB without a TxnCoordSender. - txn: client.NewTxn(client.NewDB(s.DistSender(), s.Clock()), s.NodeID()), + txn: client.NewTxn(s.DB(), s.NodeID(), client.RootTxn), } encRow := make(sqlbase.EncDatumRow, 1) diff --git a/pkg/sql/distsqlrun/server.go b/pkg/sql/distsqlrun/server.go index 57f032f4af5f..2c0648c47359 100644 --- a/pkg/sql/distsqlrun/server.go +++ b/pkg/sql/distsqlrun/server.go @@ -248,11 +248,9 @@ func (ds *ServerImpl) setupFlow( monitor.Start(ctx, &ds.memMonitor, mon.BoundAccount{}) acc := monitor.MakeBoundAccount() - // The flow will run in a Txn that bypasses the local TxnCoordSender. - txn := client.NewTxnWithProto(ds.FlowDB, req.Flow.Gateway, req.Txn) - // DistSQL transactions get retryable errors that would otherwise be handled - // by the TxnCoordSender. - txn.AcceptUnhandledRetryableErrors() + // The flow will run in a Txn that specifies child=true because we + // do not want each distributed Txn to heartbeat the transaction. + txn := client.NewTxnWithProto(ds.FlowDB, req.Flow.Gateway, client.LeafTxn, req.Txn) location, err := timeutil.TimeZoneStringToLocation(req.EvalContext.Location) if err != nil { diff --git a/pkg/sql/distsqlrun/server_test.go b/pkg/sql/distsqlrun/server_test.go index 8b35786dea42..1ece4c53f338 100644 --- a/pkg/sql/distsqlrun/server_test.go +++ b/pkg/sql/distsqlrun/server_test.go @@ -60,7 +60,7 @@ func TestServer(t *testing.T) { OutputColumns: []uint32{0, 1}, // a } - txn := client.NewTxn(kvDB, s.NodeID()) + txn := client.NewTxn(kvDB, s.NodeID(), client.RootTxn) req := &SetupFlowRequest{Version: Version, Txn: *txn.Proto()} req.Flow = FlowSpec{ @@ -100,6 +100,7 @@ func TestServer(t *testing.T) { } rows, metas = testGetDecodedRows(t, &decoder, rows, metas) } + metas = ignoreTxnMeta(metas) if len(metas) != 0 { t.Errorf("unexpected metadata: %v", metas) } diff --git a/pkg/sql/distsqlrun/stream_decoder.go b/pkg/sql/distsqlrun/stream_decoder.go index 3719629790bd..d1b67b7198cf 100644 --- a/pkg/sql/distsqlrun/stream_decoder.go +++ b/pkg/sql/distsqlrun/stream_decoder.go @@ -108,6 +108,9 @@ func (sd *StreamDecoder) AddMessage(msg *ProducerMessage) error { case *RemoteProducerMetadata_TraceData_: meta.TraceData = v.TraceData.CollectedSpans + case *RemoteProducerMetadata_TxnMeta: + meta.TxnMeta = v.TxnMeta + case *RemoteProducerMetadata_Error: meta.Err = v.Error.ErrorDetail() diff --git a/pkg/sql/distsqlrun/stream_encoder.go b/pkg/sql/distsqlrun/stream_encoder.go index 455943c870ee..04cff0a530b8 100644 --- a/pkg/sql/distsqlrun/stream_encoder.go +++ b/pkg/sql/distsqlrun/stream_encoder.go @@ -91,6 +91,10 @@ func (se *StreamEncoder) AddMetadata(meta ProducerMetadata) { CollectedSpans: meta.TraceData, }, } + } else if meta.TxnMeta != nil { + enc.Value = &RemoteProducerMetadata_TxnMeta{ + TxnMeta: meta.TxnMeta, + } } else { enc.Value = &RemoteProducerMetadata_Error{ Error: NewError(meta.Err), diff --git a/pkg/sql/distsqlrun/tablereader.go b/pkg/sql/distsqlrun/tablereader.go index 2eb0ebf2f78c..3d6cc18e4b92 100644 --- a/pkg/sql/distsqlrun/tablereader.go +++ b/pkg/sql/distsqlrun/tablereader.go @@ -291,6 +291,8 @@ func (tr *tableReader) producerMeta(err error) *ProducerMetadata { if traceData != nil { tr.trailingMetadata = append(tr.trailingMetadata, ProducerMetadata{TraceData: traceData}) } + txnMeta := tr.flowCtx.txn.GetTxnCoordMeta() + tr.trailingMetadata = append(tr.trailingMetadata, ProducerMetadata{TxnMeta: &txnMeta}) tr.close() } if len(tr.trailingMetadata) > 0 { diff --git a/pkg/sql/distsqlrun/tablereader_test.go b/pkg/sql/distsqlrun/tablereader_test.go index 6dfe02c35df9..969d443f3b1b 100644 --- a/pkg/sql/distsqlrun/tablereader_test.go +++ b/pkg/sql/distsqlrun/tablereader_test.go @@ -126,9 +126,8 @@ func TestTableReader(t *testing.T) { Ctx: context.Background(), EvalCtx: evalCtx, Settings: s.ClusterSettings(), - // Pass a DB without a TxnCoordSender. - txn: client.NewTxn(client.NewDB(s.DistSender(), s.Clock()), s.NodeID()), - nodeID: s.NodeID(), + txn: client.NewTxn(s.DB(), s.NodeID(), client.RootTxn), + nodeID: s.NodeID(), } tr, err := newTableReader(&flowCtx, &ts, &c.post, nil /* output */) @@ -152,7 +151,9 @@ func TestTableReader(t *testing.T) { for { row, meta := out.Next() if meta != nil { - t.Fatalf("unexpected metadata: %+v", meta) + if meta.TxnMeta == nil { + t.Fatalf("unexpected metadata: %+v", meta) + } } if row == nil { break @@ -195,7 +196,7 @@ ALTER TABLE t TESTING_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[3], 3 t.Fatal(err) } - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() td := sqlbase.GetTableDescriptor(kvDB, "test", "t") evalCtx := tree.MakeTestingEvalContext() @@ -205,9 +206,8 @@ ALTER TABLE t TESTING_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[3], 3 Ctx: context.Background(), EvalCtx: evalCtx, Settings: tc.Server(0).ClusterSettings(), - // Pass a DB without a TxnCoordSender. - txn: client.NewTxn(client.NewDB(tc.Server(0).DistSender(), tc.Server(0).Clock()), nodeID), - nodeID: nodeID, + txn: client.NewTxn(tc.Server(0).DB(), nodeID, client.RootTxn), + nodeID: nodeID, } spec := TableReaderSpec{ Spans: []TableReaderSpan{{Span: td.PrimaryIndexSpan()}}, @@ -254,10 +254,14 @@ ALTER TABLE t TESTING_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[3], 3 if len(res) != 3 { t.Fatalf("expected 3 rows, got: %d", len(res)) } - if len(metas) != 1 { - t.Fatalf("expected one meta with misplanned ranges, got: %+v", metas) + var misplannedRanges []roachpb.RangeInfo + for _, m := range metas { + if len(m.Ranges) > 0 { + misplannedRanges = m.Ranges + } else if m.TxnMeta == nil { + t.Fatalf("expected only txn meta or misplanned ranges, got: %+v", metas) + } } - misplannedRanges := metas[0].Ranges if len(misplannedRanges) != 2 { t.Fatalf("expected 2 misplanned ranges, got: %+v", misplannedRanges) } @@ -294,9 +298,8 @@ func BenchmarkTableReader(b *testing.B) { Ctx: context.Background(), EvalCtx: evalCtx, Settings: s.ClusterSettings(), - // Pass a DB without a TxnCoordSender. - txn: client.NewTxn(client.NewDB(s.DistSender(), s.Clock()), s.NodeID()), - nodeID: s.NodeID(), + txn: client.NewTxn(s.DB(), s.NodeID(), client.RootTxn), + nodeID: s.NodeID(), } spec := TableReaderSpec{ Table: *tableDesc, diff --git a/pkg/sql/executor.go b/pkg/sql/executor.go index 12a3beb61031..b96bd5a2db23 100644 --- a/pkg/sql/executor.go +++ b/pkg/sql/executor.go @@ -515,7 +515,7 @@ func (e *Executor) Prepare( // TODO(vivek): perhaps we should be more consistent and update // session.TxnState.mu.txn, but more thought needs to be put into whether that // is really needed. - txn = client.NewTxn(e.cfg.DB, e.cfg.NodeID.Get()) + txn = client.NewTxn(e.cfg.DB, e.cfg.NodeID.Get(), client.RootTxn) if err := txn.SetIsolation(session.data.DefaultIsolationLevel); err != nil { panic(fmt.Errorf("cannot set up txn for prepare %q: %v", stmtStr, err)) } @@ -1743,7 +1743,7 @@ func (e *Executor) execStmtInOpenTxn( // We want to disallow SAVEPOINTs to be issued after a transaction has // started running. The client txn's statement count indicates how many // statements have been executed as part of this transaction. - if txnState.mu.txn.CommandCount() > 0 { + if meta := txnState.mu.txn.GetTxnCoordMeta(); meta.CommandCount > 0 { return errors.Errorf("SAVEPOINT %s needs to be the first statement in a "+ "transaction", tree.RestartSavepointName) } @@ -1771,7 +1771,7 @@ func (e *Executor) execStmtInOpenTxn( txnState.SetState(AutoRetry) // If commands have already been sent through the transaction, // restart the client txn's proto to increment the epoch. - if txnState.mu.txn.CommandCount() > 0 { + if meta := txnState.mu.txn.GetTxnCoordMeta(); meta.CommandCount > 0 { // TODO(andrei): Should the timestamp below be e.cfg.Clock.Now(), so that // the transaction gets a new timestamp? txnState.mu.txn.Proto().Restart( @@ -2130,23 +2130,10 @@ func shouldUseDistSQL( return false, nil } - var err error - var distribute bool - - // Temporary workaround for #13376: if the transaction wrote something, - // we can't allow it to do DistSQL reads any more because we can't guarantee - // that the reads don't happen after the gateway's TxnCoordSender has - // abandoned the transaction (and so the reads could miss to see their own - // writes). We detect this by checking if the transaction's "anchor" key is - // set. - if planner.txn.AnchorKey() != nil { - err = errors.New("writing txn") - } else { - // Trigger limit propagation. - planner.setUnlimited(plan) - distribute, err = dp.CheckSupport(plan) - } + // Trigger limit propagation. + planner.setUnlimited(plan) + distribute, err := dp.CheckSupport(plan) if err != nil { // If the distSQLMode is ALWAYS, reject anything but SET. if distSQLMode == sessiondata.DistSQLAlways && err != setNotSupportedError { diff --git a/pkg/sql/executor_opt_interface.go b/pkg/sql/executor_opt_interface.go index 0fb271eefca5..c65edc9d3365 100644 --- a/pkg/sql/executor_opt_interface.go +++ b/pkg/sql/executor_opt_interface.go @@ -26,7 +26,7 @@ import ( // NewExecFactory is used from opt tests to create and execute plans. func (e *Executor) NewExecFactory() opt.ExecFactory { - txn := client.NewTxn(e.cfg.DB, e.cfg.NodeID.Get()) + txn := client.NewTxn(e.cfg.DB, e.cfg.NodeID.Get(), client.RootTxn) return &execFactory{ planner: makeInternalPlanner("opt", txn, "root", &MemoryMetrics{}), } diff --git a/pkg/sql/jobs/registry_external_test.go b/pkg/sql/jobs/registry_external_test.go index a09f155e24e6..a16cbd1049f8 100644 --- a/pkg/sql/jobs/registry_external_test.go +++ b/pkg/sql/jobs/registry_external_test.go @@ -23,7 +23,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" @@ -76,7 +75,7 @@ func TestRegistryResumeExpiredLease(t *testing.T) { s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - db := s.KVClient().(*client.DB) + db := s.DB() ex := sql.InternalExecutor{LeaseManager: s.LeaseManager().(*sql.LeaseManager)} gossip := s.Gossip() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) diff --git a/pkg/sql/parallel_stmts_test.go b/pkg/sql/parallel_stmts_test.go index b9a54bd600c8..fc8886987eb3 100644 --- a/pkg/sql/parallel_stmts_test.go +++ b/pkg/sql/parallel_stmts_test.go @@ -314,10 +314,10 @@ func TestParallelizeQueueAddAfterError(t *testing.T) { func planQuery( t *testing.T, s serverutils.TestServerInterface, sql string, ) (*planner, planNode, func()) { - kvDB := s.KVClient().(*client.DB) + kvDB := s.DB() now := s.Clock().Now() physicalNow := s.Clock().PhysicalTime() - txn := client.NewTxn(kvDB, s.NodeID()) + txn := client.NewTxn(kvDB, s.NodeID(), client.RootTxn) txn.Proto().OrigTimestamp = now p := makeInternalPlanner("plan", txn, security.RootUser, &MemoryMetrics{}) p.session.tables.leaseMgr = s.LeaseManager().(*LeaseManager) @@ -365,7 +365,7 @@ func TestSpanBasedDependencyAnalyzer(t *testing.T) { CREATE TABLE bar ( k INT PRIMARY KEY DEFAULT 0, v INT DEFAULT 1, - a INT, + a INT, UNIQUE INDEX idx(v) ) `); err != nil { diff --git a/pkg/sql/schema_changer_test.go b/pkg/sql/schema_changer_test.go index 06e78c354a0f..6f9c7d231ec0 100644 --- a/pkg/sql/schema_changer_test.go +++ b/pkg/sql/schema_changer_test.go @@ -606,7 +606,7 @@ func TestRaceWithBackfill(t *testing.T) { ServerArgs: params, }) defer tc.Stopper().Stop(context.TODO()) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) jobRegistry := tc.Server(0).JobRegistry().(*jobs.Registry) @@ -773,7 +773,7 @@ func TestDropWhileBackfill(t *testing.T) { ServerArgs: params, }) defer tc.Stopper().Stop(context.TODO()) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) if _, err := sqlDB.Exec(` @@ -871,7 +871,7 @@ func TestBackfillErrors(t *testing.T) { ServerArgs: params, }) defer tc.Stopper().Stop(context.TODO()) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) if _, err := sqlDB.Exec(` @@ -2100,7 +2100,7 @@ func TestBackfillCompletesOnChunkBoundary(t *testing.T) { ServerArgs: params, }) defer tc.Stopper().Stop(context.TODO()) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) if _, err := sqlDB.Exec(` @@ -2373,7 +2373,7 @@ func TestSchemaChangeEvalContext(t *testing.T) { ServerArgs: params, }) defer tc.Stopper().Stop(context.TODO()) - kvDB := tc.Server(0).KVClient().(*client.DB) + kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) if _, err := sqlDB.Exec(` diff --git a/pkg/sql/session.go b/pkg/sql/session.go index caa7ffec09cf..c099b6cd238e 100644 --- a/pkg/sql/session.go +++ b/pkg/sql/session.go @@ -1089,7 +1089,7 @@ func (ts *txnState) resetForNewSQLTxn( ts.mon.Start(ctx, &s.mon, mon.BoundAccount{}) ts.mu.Lock() - ts.mu.txn = client.NewTxn(e.cfg.DB, e.cfg.NodeID.Get()) + ts.mu.txn = client.NewTxn(e.cfg.DB, e.cfg.NodeID.Get(), client.RootTxn) ts.mu.Unlock() if ts.implicitTxn { ts.mu.txn.SetDebugName(sqlImplicitTxnName) diff --git a/pkg/sql/sqlbase/multirowfetcher_test.go b/pkg/sql/sqlbase/multirowfetcher_test.go index 4dca0e100de4..04ff1bb52a19 100644 --- a/pkg/sql/sqlbase/multirowfetcher_test.go +++ b/pkg/sql/sqlbase/multirowfetcher_test.go @@ -158,7 +158,7 @@ func TestNextRowSingle(t *testing.T) { if err := mrf.StartScan( context.TODO(), - client.NewTxn(kvDB, 0), + client.NewTxn(kvDB, 0, client.RootTxn), roachpb.Spans{tableDesc.TableSpan()}, false, /*limitBatches*/ 0, /*limitHint*/ @@ -332,7 +332,7 @@ func TestNextRowSecondaryIndex(t *testing.T) { if err := mrf.StartScan( context.TODO(), - client.NewTxn(kvDB, 0), + client.NewTxn(kvDB, 0, client.RootTxn), roachpb.Spans{tableDesc.TableSpan()}, false, /*limitBatches*/ 0, /*limitHint*/ @@ -692,7 +692,7 @@ func TestNextRowInterleaved(t *testing.T) { if err := mrf.StartScan( context.TODO(), - client.NewTxn(kvDB, 0), + client.NewTxn(kvDB, 0, client.RootTxn), lookupSpans, false, /*limitBatches*/ 0, /*limitHint*/ diff --git a/pkg/sql/sqlbase/structured_test.go b/pkg/sql/sqlbase/structured_test.go index 2ea5ab8a1df5..cd6bf7824e3c 100644 --- a/pkg/sql/sqlbase/structured_test.go +++ b/pkg/sql/sqlbase/structured_test.go @@ -843,7 +843,7 @@ func TestValidateCrossTableReferences(t *testing.T) { t.Fatal(err) } } - txn := client.NewTxn(kvDB, s.NodeID()) + txn := client.NewTxn(kvDB, s.NodeID(), client.RootTxn) if err := test.desc.validateCrossReferences(context.TODO(), txn); err == nil { t.Errorf("%d: expected \"%s\", but found success: %+v", i, test.err, test.desc) } else if test.err != err.Error() { diff --git a/pkg/storage/addressing_test.go b/pkg/storage/addressing_test.go index f818bc843f5c..27c4cc80f783 100644 --- a/pkg/storage/addressing_test.go +++ b/pkg/storage/addressing_test.go @@ -141,12 +141,13 @@ func TestUpdateRangeAddressing(t *testing.T) { // interface without sending through a TxnCoordSender (which initializes a // transaction id). Also, we need the TxnCoordSender to clean up the // intents, otherwise the MVCCScan that the test does below fails. - tcs := kv.NewTxnCoordSender(log.AmbientContext{Tracer: st.Tracer}, - st, + tcsf := kv.NewTxnCoordSenderFactory( + log.AmbientContext{Tracer: st.Tracer}, st, store.testSender(), store.cfg.Clock, - false, stopper, kv.MakeTxnMetrics(time.Second)) - db := client.NewDB(tcs, store.cfg.Clock) - txn := client.NewTxn(db, 0 /* gatewayNodeID */) + false, stopper, kv.MakeTxnMetrics(time.Second), + ) + db := client.NewDB(tcsf, store.cfg.Clock) + txn := client.NewTxn(db, 0 /* gatewayNodeID */, client.RootTxn) ctx := context.Background() if err := txn.Run(ctx, b); err != nil { t.Fatal(err) diff --git a/pkg/storage/client_replica_test.go b/pkg/storage/client_replica_test.go index 6b4f7ef446ff..ce25f019dbdd 100644 --- a/pkg/storage/client_replica_test.go +++ b/pkg/storage/client_replica_test.go @@ -989,7 +989,7 @@ func TestLeaseExtensionNotBlockedByRead(t *testing.T) { Key: key, }, } - if _, pErr := client.SendWrappedWith(context.Background(), s.DistSender(), + if _, pErr := client.SendWrappedWith(context.Background(), s.DB().GetSender(), roachpb.Header{UserPriority: 42}, &getReq); pErr != nil { errChan <- pErr.GoError() @@ -1026,7 +1026,7 @@ func TestLeaseExtensionNotBlockedByRead(t *testing.T) { }, PrevLease: curLease, } - if _, pErr := client.SendWrapped(context.Background(), s.DistSender(), &leaseReq); pErr != nil { + if _, pErr := client.SendWrapped(context.Background(), s.DB().GetSender(), &leaseReq); pErr != nil { t.Fatal(pErr) } // Unblock the read. @@ -1178,7 +1178,7 @@ func TestErrorHandlingForNonKVCommand(t *testing.T) { } _, pErr := client.SendWrappedWith( context.Background(), - s.DistSender(), + s.DB().GetSender(), roachpb.Header{UserPriority: 42}, &leaseReq, ) diff --git a/pkg/storage/client_split_test.go b/pkg/storage/client_split_test.go index a708d6675e02..a75531d89d61 100644 --- a/pkg/storage/client_split_test.go +++ b/pkg/storage/client_split_test.go @@ -1466,7 +1466,7 @@ func TestStoreSplitTimestampCacheDifferentLeaseHolder(t *testing.T) { ctx = tc.Server(0).Stopper().WithCancel(ctx) // This transaction will try to write "under" a served read. - txnOld := client.NewTxn(db, 0 /* gatewayNodeID */) + txnOld := client.NewTxn(db, 0 /* gatewayNodeID */, client.RootTxn) // Do something with txnOld so that its timestamp gets set. if _, err := txnOld.Scan(ctx, "a", "b", 0); err != nil { @@ -2158,7 +2158,7 @@ func TestDistributedTxnCleanup(t *testing.T) { // Run a distributed transaction involving the lhsKey and rhsKey. var txnKey roachpb.Key ctx := context.Background() - txn := client.NewTxn(store.DB(), 0 /* gatewayNodeID */) + txn := client.NewTxn(store.DB(), 0 /* gatewayNodeID */, client.RootTxn) opts := client.TxnExecOptions{ AutoCommit: true, AutoRetry: false, @@ -2517,13 +2517,13 @@ func TestRangeLookupAfterMeta2Split(t *testing.T) { // will require a scan that continues into the next meta2 range. const tableID = keys.MaxReservedDescID + 2 // 51 splitReq := adminSplitArgs(keys.MakeTablePrefix(tableID - 3 /* 48 */)) - if _, pErr := client.SendWrapped(ctx, s.DistSender(), splitReq); pErr != nil { + if _, pErr := client.SendWrapped(ctx, s.DB().GetSender(), splitReq); pErr != nil { t.Fatal(pErr) } metaKey := keys.RangeMetaKey(keys.MakeTablePrefix(tableID)).AsRawKey() splitReq = adminSplitArgs(metaKey) - if _, pErr := client.SendWrapped(ctx, s.DistSender(), splitReq); pErr != nil { + if _, pErr := client.SendWrapped(ctx, s.DB().GetSender(), splitReq); pErr != nil { t.Fatal(pErr) } @@ -2549,7 +2549,7 @@ func TestRangeLookupAfterMeta2Split(t *testing.T) { } else { lookupReq = &roachpb.ScanRequest{Span: span} } - if _, err := client.SendWrapped(ctx, s.DistSender(), lookupReq); err != nil { + if _, err := client.SendWrapped(ctx, s.DB().GetSender(), lookupReq); err != nil { t.Fatalf("%T %v", err.GoError(), err) } }) diff --git a/pkg/storage/client_test.go b/pkg/storage/client_test.go index fbe812d787f5..28d04240afc8 100644 --- a/pkg/storage/client_test.go +++ b/pkg/storage/client_test.go @@ -140,7 +140,7 @@ func createTestStoreWithEngine( RPCRetryOptions: &retryOpts, }, storeCfg.Gossip) - sender := kv.NewTxnCoordSender( + tcsFactory := kv.NewTxnCoordSenderFactory( ac, storeCfg.Settings, distSender, @@ -149,7 +149,7 @@ func createTestStoreWithEngine( stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), ) - storeCfg.DB = client.NewDB(sender, storeCfg.Clock) + storeCfg.DB = client.NewDB(tcsFactory, storeCfg.Clock) storeCfg.StorePool = storage.NewTestStorePool(storeCfg) storeCfg.Transport = storage.NewDummyRaftTransport(storeCfg.Settings) // TODO(bdarnell): arrange to have the transport closed. @@ -658,7 +658,7 @@ func (m *multiTestContext) populateDB(idx int, stopper *stop.Stopper) { }, RPCRetryOptions: &retryOpts, }, m.gossips[idx]) - sender := kv.NewTxnCoordSender( + tcsFactory := kv.NewTxnCoordSenderFactory( ambient, m.storeConfig.Settings, m.distSenders[idx], @@ -667,7 +667,7 @@ func (m *multiTestContext) populateDB(idx int, stopper *stop.Stopper) { stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), ) - m.dbs[idx] = client.NewDB(sender, m.clock) + m.dbs[idx] = client.NewDB(tcsFactory, m.clock) } func (m *multiTestContext) populateStorePool(idx int, nodeLiveness *storage.NodeLiveness) { diff --git a/pkg/storage/gc_queue.go b/pkg/storage/gc_queue.go index 1bb1805e1629..c016a944f507 100644 --- a/pkg/storage/gc_queue.go +++ b/pkg/storage/gc_queue.go @@ -59,11 +59,7 @@ const ( // abortSpanAgeThreshold is the duration after which AbortSpan entries // of transactions are garbage collected. - // It's important that this is kept aligned with the (maximum) heartbeat - // interval used by transaction coordinators throughout the cluster to make - // sure that no coordinator can run with a transaction which has been - // aborted and whose AbortSpan entry is being deleted. - abortSpanAgeThreshold = 5 * base.DefaultHeartbeatInterval + abortSpanAgeThreshold = txnCleanupThreshold // Thresholds used to decide whether to queue for GC based // on keys and intents. diff --git a/pkg/storage/gossip_test.go b/pkg/storage/gossip_test.go index e1ee64c1129d..97222048ed01 100644 --- a/pkg/storage/gossip_test.go +++ b/pkg/storage/gossip_test.go @@ -23,7 +23,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" @@ -188,7 +187,7 @@ func TestGossipHandlesReplacedNode(t *testing.T) { if i == oldNodeIdx { continue } - kvClient := server.KVClient().(*client.DB) + kvClient := server.DB() if err := kvClient.Put(ctx, fmt.Sprintf("%d", i), i); err != nil { t.Errorf("failed Put to node %d: %s", i, err) } diff --git a/pkg/storage/idalloc/id_alloc_test.go b/pkg/storage/idalloc/id_alloc_test.go index 9eb1a0a4064a..92f71cd613d5 100644 --- a/pkg/storage/idalloc/id_alloc_test.go +++ b/pkg/storage/idalloc/id_alloc_test.go @@ -42,7 +42,7 @@ import ( // by calling Stop. func newTestAllocator(t testing.TB) (*localtestcluster.LocalTestCluster, *idalloc.Allocator) { s := &localtestcluster.LocalTestCluster{} - s.Start(t, testutils.NewNodeTestBaseContext(), kv.InitSenderForLocalTestCluster) + s.Start(t, testutils.NewNodeTestBaseContext(), kv.InitFactoryForLocalTestCluster) idAlloc, err := idalloc.NewAllocator( s.Cfg.AmbientCtx, keys.RangeIDGenerator, diff --git a/pkg/storage/replica_test.go b/pkg/storage/replica_test.go index 38dbe8e6e915..b1f334c05322 100644 --- a/pkg/storage/replica_test.go +++ b/pkg/storage/replica_test.go @@ -183,8 +183,8 @@ func (tc *testContext) StartWithStoreConfig(t testing.TB, stopper *stop.Stopper, // Create a test sender without setting a store. This is to deal with the // circular dependency between the test sender and the store. The actual // store will be passed to the sender after it is created and bootstrapped. - sender := &testSender{} - cfg.DB = client.NewDB(sender, cfg.Clock) + factory := &testSenderFactory{} + cfg.DB = client.NewDB(factory, cfg.Clock) tc.store = NewStore(cfg, tc.engine, &roachpb.NodeDescriptor{NodeID: 1}) if err := tc.store.Bootstrap(ctx, roachpb.StoreIdent{ ClusterID: uuid.MakeV4(), @@ -193,8 +193,8 @@ func (tc *testContext) StartWithStoreConfig(t testing.TB, stopper *stop.Stopper, }, cfg.Settings.Version.BootstrapVersion()); err != nil { t.Fatal(err) } - // Now that we have our actual store, monkey patch the sender used in cfg.DB. - sender.store = tc.store + // Now that we have our actual store, monkey patch the factory used in cfg.DB. + factory.store = tc.store // We created the store without a real KV client, so it can't perform splits. tc.store.splitQueue.SetDisabled(true) @@ -8757,7 +8757,7 @@ func TestErrorInRaftApplicationClearsIntents(t *testing.T) { var ba roachpb.BatchRequest ba.Header.Txn = txn ba.Add(&btArgs) - if _, pErr := s.DistSender().Send(context.TODO(), ba); pErr != nil { + if _, pErr := s.DB().GetSender().Send(context.TODO(), ba); pErr != nil { t.Fatal(pErr.GoError()) } diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go index 4d9398310295..461242bcd0a8 100644 --- a/pkg/storage/store_test.go +++ b/pkg/storage/store_test.go @@ -61,12 +61,6 @@ var testIdent = roachpb.StoreIdent{ StoreID: 1, } -// testSender is an implementation of the client.Sender interface -// which passes all requests through to a single store. -type testSender struct { - store *Store -} - func (s *Store) testSender() client.Sender { return client.Wrap(s, func(ba roachpb.BatchRequest) roachpb.BatchRequest { if ba.RangeID == 0 { @@ -76,6 +70,28 @@ func (s *Store) testSender() client.Sender { }) } +// testSenderFactory is an implementation of the +// client.TxnSenderFactory interface. +type testSenderFactory struct { + store *Store +} + +func (f *testSenderFactory) New(typ client.TxnType) client.TxnSender { + return &testSender{store: f.store} +} + +// testSender is an implementation of the client.TxnSender interface +// which passes all requests through to a single store. +type testSender struct { + store *Store +} + +func (db *testSender) GetMeta() roachpb.TxnCoordMeta { panic("unimplemented") } + +func (db *testSender) AugmentMeta(roachpb.TxnCoordMeta) { panic("unimplemented") } + +func (db *testSender) OnFinish(func(error)) { panic("unimplemented") } + // Send forwards the call to the single store. This is a poor man's // version of kv.TxnCoordSender, but it serves the purposes of // supporting tests in this package. Transactions are not supported. @@ -141,10 +157,10 @@ func createTestStoreWithoutStart(t testing.TB, stopper *stop.Stopper, cfg *Store eng := engine.NewInMem(roachpb.Attributes{}, 10<<20) stopper.AddCloser(eng) cfg.Transport = NewDummyRaftTransport(cfg.Settings) - sender := &testSender{} - cfg.DB = client.NewDB(sender, cfg.Clock) + factory := &testSenderFactory{} + cfg.DB = client.NewDB(factory, cfg.Clock) store := NewStore(*cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) - sender.store = store + factory.store = store if err := store.Bootstrap( context.TODO(), roachpb.StoreIdent{NodeID: 1, StoreID: 1}, cfg.Settings.Version.BootstrapVersion(), ); err != nil { diff --git a/pkg/testutils/localtestcluster/local_test_cluster.go b/pkg/testutils/localtestcluster/local_test_cluster.go index dc086e839739..20f07b43d534 100644 --- a/pkg/testutils/localtestcluster/local_test_cluster.go +++ b/pkg/testutils/localtestcluster/local_test_cluster.go @@ -42,7 +42,7 @@ import ( // // s := &LocalTestCluster{} // s.Start(t, testutils.NewNodeTestBaseContext(), -// kv.InitSenderForLocalTestCluster) +// kv.InitFactoryForLocalTestCluster) // defer s.Stop() // // Note that the LocalTestCluster is different from server.TestCluster @@ -58,16 +58,16 @@ type LocalTestCluster struct { DBContext *client.DBContext DB *client.DB Stores *storage.Stores - Sender client.Sender Stopper *stop.Stopper Latency time.Duration // sleep for each RPC sent tester testing.TB DontRetryPushTxnFailures bool } -// InitSenderFn is a callback used to initiate the txn coordinator (we don't -// do it directly from this package to avoid a dependency on kv). -type InitSenderFn func( +// InitFactoryFn is a callback used to initiate the txn coordinator +// sender factory (we don't do it directly from this package to avoid +// a dependency on kv). +type InitFactoryFn func( st *cluster.Settings, nodeDesc *roachpb.NodeDescriptor, tracer opentracing.Tracer, @@ -76,14 +76,14 @@ type InitSenderFn func( stores client.Sender, stopper *stop.Stopper, gossip *gossip.Gossip, -) client.Sender +) client.TxnSenderFactory // Start starts the test cluster by bootstrapping an in-memory store // (defaults to maximum of 50M). The server is started, launching the // node RPC server and all HTTP endpoints. Use the value of // TestServer.Addr after Start() for client connections. Use Stop() // to shutdown the server after the test completes. -func (ltc *LocalTestCluster) Start(t testing.TB, baseCtx *base.Config, initSender InitSenderFn) { +func (ltc *LocalTestCluster) Start(t testing.TB, baseCtx *base.Config, initFactory InitFactoryFn) { ltc.Manual = hlc.NewManualClock(123) ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano, 50*time.Millisecond) cfg := storage.TestStoreConfig(ltc.Clock) @@ -106,13 +106,12 @@ func (ltc *LocalTestCluster) Start(t testing.TB, baseCtx *base.Config, initSende ltc.Stores = storage.NewStores(ambient, ltc.Clock, cfg.Settings.Version.MinSupportedVersion, cfg.Settings.Version.ServerVersion) - ltc.Sender = initSender(cfg.Settings, nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper, - ltc.Gossip) + factory := initFactory(cfg.Settings, nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper, ltc.Gossip) if ltc.DBContext == nil { dbCtx := client.DefaultDBContext() ltc.DBContext = &dbCtx } - ltc.DB = client.NewDBWithContext(ltc.Sender, ltc.Clock, *ltc.DBContext) + ltc.DB = client.NewDBWithContext(factory, ltc.Clock, *ltc.DBContext) transport := storage.NewDummyRaftTransport(cfg.Settings) // By default, disable the replica scanner and split queue, which // confuse tests using LocalTestCluster. diff --git a/pkg/testutils/serverutils/test_server_shim.go b/pkg/testutils/serverutils/test_server_shim.go index db77b370e743..00b873a938a6 100644 --- a/pkg/testutils/serverutils/test_server_shim.go +++ b/pkg/testutils/serverutils/test_server_shim.go @@ -61,9 +61,8 @@ type TestServerInterface interface { // Addr returns the server's address. Addr() string - // KVClient() returns a *client.DB instance for talking to this KV server, - // as an interface{}. - KVClient() interface{} + // DB returns a *client.DB instance for talking to this KV server. + DB() *client.DB // RPCContext returns the rpc context used by the test server. RPCContext() *rpc.Context @@ -175,7 +174,6 @@ func StartServer( t.Fatal(err) } - kvClient := server.KVClient().(*client.DB) pgURL, cleanupGoDB := sqlutils.PGUrl( t, server.ServingAddr(), "StartServer", url.User(security.RootUser)) pgURL.Path = params.UseDatabase @@ -188,7 +186,7 @@ func StartServer( _ = goDB.Close() cleanupGoDB() })) - return server, goDB, kvClient + return server, goDB, server.DB() } // StartServerRaw creates and starts a TestServer. diff --git a/pkg/ts/db_test.go b/pkg/ts/db_test.go index 2ffefcbbf81d..ec9301acd5a0 100644 --- a/pkg/ts/db_test.go +++ b/pkg/ts/db_test.go @@ -76,7 +76,7 @@ func newTestModel(t *testing.T) testModel { // time series DB. func (tm *testModel) Start() { tm.LocalTestCluster.Start(tm.t, testutils.NewNodeTestBaseContext(), - kv.InitSenderForLocalTestCluster) + kv.InitFactoryForLocalTestCluster) tm.DB = NewDB(tm.LocalTestCluster.DB, tm.Cfg.Settings) }