diff --git a/ledger/participant-integration-api/BUILD.bazel b/ledger/participant-integration-api/BUILD.bazel index e96c3c8357c3..9fcf069f0ecf 100644 --- a/ledger/participant-integration-api/BUILD.bazel +++ b/ledger/participant-integration-api/BUILD.bazel @@ -201,6 +201,7 @@ da_scala_library( "//libs-scala/resources", "//libs-scala/resources-akka", "//libs-scala/resources-grpc", + "//libs-scala/scala-utils", "//libs-scala/timer-utils", "@maven//:io_dropwizard_metrics_metrics_core", "@maven//:io_grpc_grpc_netty", @@ -364,6 +365,8 @@ da_scala_test_suite( "//ledger/ledger-configuration", "//ledger/ledger-offset", "//ledger/ledger-resources", + "//ledger/metrics", + "//ledger/metrics:metrics-test-lib", "//ledger/participant-state", "//ledger/participant-state-index", "//libs-scala/contextualized-logging", @@ -371,6 +374,7 @@ da_scala_test_suite( "//libs-scala/oracle-testing", "//libs-scala/ports", "//libs-scala/resources", + "//libs-scala/scala-utils", "@maven//:org_scalatest_scalatest_compatible", "@maven//:org_slf4j_slf4j_api", ], diff --git a/ledger/participant-integration-api/src/main/scala/platform/store/backend/StorageBackend.scala b/ledger/participant-integration-api/src/main/scala/platform/store/backend/StorageBackend.scala index bc1a73b8887e..0cb6d2fcc7a6 100644 --- a/ledger/participant-integration-api/src/main/scala/platform/store/backend/StorageBackend.scala +++ b/ledger/participant-integration-api/src/main/scala/platform/store/backend/StorageBackend.scala @@ -49,7 +49,20 @@ trait StorageBackend[DB_BATCH] with EventStorageBackend with DataSourceStorageBackend with DBLockStorageBackend { + + /** Truncates all storage backend tables, EXCEPT the packages table. + * Does not touch other tables, like the Flyway history table. + * Reason: the reset() call is used by the ledger API reset service, + * which is mainly used for application tests in another big project, + * and re-uploading packages after each test significantly slows down their test time. + */ def reset(connection: Connection): Unit + + /** Truncates ALL storage backend tables. + * Does not touch other tables, like the Flyway history table. + * The result is a database that looks the same as a freshly created database with Flyway migrations applied. + */ + def resetAll(connection: Connection): Unit def duplicateKeyError: String // TODO: Avoid brittleness of error message checks } diff --git a/ledger/participant-integration-api/src/main/scala/platform/store/backend/h2/H2StorageBackend.scala b/ledger/participant-integration-api/src/main/scala/platform/store/backend/h2/H2StorageBackend.scala index fa5cddc8ec86..53944201f30d 100644 --- a/ledger/participant-integration-api/src/main/scala/platform/store/backend/h2/H2StorageBackend.scala +++ b/ledger/participant-integration-api/src/main/scala/platform/store/backend/h2/H2StorageBackend.scala @@ -57,6 +57,25 @@ private[backend] object H2StorageBackend () } + override def resetAll(connection: Connection): Unit = { + SQL("""set referential_integrity false; + |truncate table configuration_entries; + |truncate table packages; + |truncate table package_entries; + |truncate table parameters; + |truncate table participant_command_completions; + |truncate table participant_command_submissions; + |truncate table participant_events_divulgence; + |truncate table participant_events_create; + |truncate table participant_events_consuming_exercise; + |truncate table participant_events_non_consuming_exercise; + |truncate table parties; + |truncate table party_entries; + |set referential_integrity true;""".stripMargin) + .execute()(connection) + () + } + override def duplicateKeyError: String = "Unique index or primary key violation" val SQL_INSERT_COMMAND: String = diff --git a/ledger/participant-integration-api/src/main/scala/platform/store/backend/oracle/OracleStorageBackend.scala b/ledger/participant-integration-api/src/main/scala/platform/store/backend/oracle/OracleStorageBackend.scala index e031ebcb4356..de710135b453 100644 --- a/ledger/participant-integration-api/src/main/scala/platform/store/backend/oracle/OracleStorageBackend.scala +++ b/ledger/participant-integration-api/src/main/scala/platform/store/backend/oracle/OracleStorageBackend.scala @@ -54,6 +54,22 @@ private[backend] object OracleStorageBackend "truncate table party_entries cascade", ).map(SQL(_)).foreach(_.execute()(connection)) + override def resetAll(connection: Connection): Unit = + List( + "truncate table configuration_entries cascade", + "truncate table packages cascade", + "truncate table package_entries cascade", + "truncate table parameters cascade", + "truncate table participant_command_completions cascade", + "truncate table participant_command_submissions cascade", + "truncate table participant_events_divulgence cascade", + "truncate table participant_events_create cascade", + "truncate table participant_events_consuming_exercise cascade", + "truncate table participant_events_non_consuming_exercise cascade", + "truncate table parties cascade", + "truncate table party_entries cascade", + ).map(SQL(_)).foreach(_.execute()(connection)) + override def duplicateKeyError: String = "unique constraint" val SQL_INSERT_COMMAND: String = diff --git a/ledger/participant-integration-api/src/main/scala/platform/store/backend/postgresql/PostgresStorageBackend.scala b/ledger/participant-integration-api/src/main/scala/platform/store/backend/postgresql/PostgresStorageBackend.scala index 7ee9ebfccf5e..318f1072d70a 100644 --- a/ledger/participant-integration-api/src/main/scala/platform/store/backend/postgresql/PostgresStorageBackend.scala +++ b/ledger/participant-integration-api/src/main/scala/platform/store/backend/postgresql/PostgresStorageBackend.scala @@ -88,6 +88,24 @@ private[backend] object PostgresStorageBackend () } + override def resetAll(connection: Connection): Unit = { + SQL("""truncate table configuration_entries cascade; + |truncate table packages cascade; + |truncate table package_entries cascade; + |truncate table parameters cascade; + |truncate table participant_command_completions cascade; + |truncate table participant_command_submissions cascade; + |truncate table participant_events_divulgence cascade; + |truncate table participant_events_create cascade; + |truncate table participant_events_consuming_exercise cascade; + |truncate table participant_events_non_consuming_exercise cascade; + |truncate table parties cascade; + |truncate table party_entries cascade; + |""".stripMargin) + .execute()(connection) + () + } + override val duplicateKeyError: String = "duplicate key" object PostgresQueryStrategy extends QueryStrategy { diff --git a/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendPostgresSpec.scala b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendPostgresSpec.scala deleted file mode 100644 index 7fe0919e0346..000000000000 --- a/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendPostgresSpec.scala +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.daml.platform.store.backend - -import java.sql.Connection - -import com.codahale.metrics.MetricRegistry -import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll -import com.daml.ledger.resources.{Resource, ResourceContext} -import com.daml.logging.{ContextualizedLogger, LoggingContext} -import com.daml.metrics.Metrics -import com.daml.platform.configuration.ServerRole -import com.daml.platform.store.appendonlydao.DbDispatcher -import com.daml.platform.store.{DbType, FlywayMigrations} -import com.daml.testing.postgresql.PostgresAroundEach -import org.scalatest.AsyncTestSuite - -import scala.concurrent.{Await, Future} -import scala.concurrent.duration.{DurationInt, FiniteDuration} - -private[backend] trait StorageBackendPostgresSpec - extends AkkaBeforeAndAfterAll - with PostgresAroundEach { this: AsyncTestSuite => - - protected val logger: ContextualizedLogger = ContextualizedLogger.get(getClass) - implicit val loggingContext: LoggingContext = LoggingContext.ForTesting - - private val dbType: DbType = DbType.Postgres - private def jdbcUrl: String = postgresDatabase.url - - private val connectionPoolSize: Int = 16 - private val metrics = new Metrics(new MetricRegistry) - - // The storage backend is stateless - protected val storageBackend: StorageBackend[_] = StorageBackend.of(dbType) - - // Each test gets its own database and its own connection pool - private var resource: Resource[DbDispatcher] = _ - protected var dbDispatcher: DbDispatcher = _ - - protected def executeSql[T](sql: Connection => T): Future[T] = { - dbDispatcher.executeSql(metrics.test.db)(sql) - } - - override protected def beforeEach(): Unit = { - super.beforeEach() - - // TODO: use a custom execution context, like JdbcLedgeDao.beforeAll() - implicit val resourceContext: ResourceContext = ResourceContext(system.dispatcher) - - resource = for { - _ <- Resource.fromFuture( - new FlywayMigrations(jdbcUrl).migrate(enableAppendOnlySchema = true) - ) - dispatcher <- DbDispatcher - .owner( - dataSource = storageBackend.createDataSource(jdbcUrl), - serverRole = ServerRole.Testing(this.getClass), - connectionPoolSize = connectionPoolSize, - connectionTimeout = FiniteDuration(250, "millis"), - metrics = metrics, - ) - .acquire() - } yield dispatcher - dbDispatcher = Await.result(resource.asFuture, 30.seconds) - } - - override protected def afterEach(): Unit = { - Await.result(resource.release(), 30.seconds) - super.afterEach() - } -} diff --git a/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendProvider.scala b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendProvider.scala new file mode 100644 index 000000000000..d02d2f95294c --- /dev/null +++ b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendProvider.scala @@ -0,0 +1,46 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import java.sql.Connection + +import com.daml.platform.store.backend.h2.H2StorageBackend +import com.daml.platform.store.backend.oracle.OracleStorageBackend +import com.daml.platform.store.backend.postgresql.PostgresStorageBackend +import com.daml.testing.oracle.OracleAroundAll +import com.daml.testing.postgresql.PostgresAroundAll +import org.scalatest.Suite + +/** Creates a database and a [[StorageBackend]]. + * Used by [[StorageBackendSpec]] to run all StorageBackend tests on different databases. + */ +private[backend] trait StorageBackendProvider { + protected def jdbcUrl: String + protected def backend: StorageBackend[_] + protected final def ingest(dbDtos: Vector[DbDto], connection: Connection): Unit = { + def typeBoundIngest[T](backend: StorageBackend[T]): Unit = + backend.insertBatch(connection, backend.batch(dbDtos)) + typeBoundIngest(backend) + } +} + +private[backend] trait StorageBackendProviderPostgres + extends StorageBackendProvider + with PostgresAroundAll { this: Suite => + override protected def jdbcUrl: String = postgresDatabase.url + override protected val backend: StorageBackend[_] = PostgresStorageBackend +} + +private[backend] trait StorageBackendProviderH2 extends StorageBackendProvider { this: Suite => + override protected def jdbcUrl: String = "jdbc:h2:mem:storage_backend_provider;db_close_delay=-1" + override protected val backend: StorageBackend[_] = H2StorageBackend +} + +private[backend] trait StorageBackendProviderOracle + extends StorageBackendProvider + with OracleAroundAll { this: Suite => + override protected def jdbcUrl: String = + s"jdbc:oracle:thin:$oracleUser/$oraclePwd@localhost:$oraclePort/ORCLPDB1" + override protected val backend: StorageBackend[_] = OracleStorageBackend +} diff --git a/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendSpec.scala b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendSpec.scala new file mode 100644 index 000000000000..1a7dfccffce9 --- /dev/null +++ b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendSpec.scala @@ -0,0 +1,95 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import java.sql.Connection +import java.util.concurrent.atomic.AtomicInteger + +import com.codahale.metrics.MetricRegistry +import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll +import com.daml.ledger.resources.{Resource, ResourceContext} +import com.daml.logging.{ContextualizedLogger, LoggingContext} +import com.daml.metrics.Metrics +import com.daml.platform.configuration.ServerRole +import com.daml.platform.store.appendonlydao.DbDispatcher +import com.daml.platform.store.FlywayMigrations +import org.scalatest.{AsyncTestSuite, BeforeAndAfterEach} + +import scala.concurrent.{Await, Future} +import scala.concurrent.duration.{DurationInt, FiniteDuration} + +private[backend] trait StorageBackendSpec + extends AkkaBeforeAndAfterAll + with BeforeAndAfterEach + with StorageBackendProvider { this: AsyncTestSuite => + + protected val logger: ContextualizedLogger = ContextualizedLogger.get(getClass) + implicit protected val loggingContext: LoggingContext = LoggingContext.ForTesting + + private val connectionPoolSize: Int = 16 + private val metrics = new Metrics(new MetricRegistry) + + // Initialized in beforeAll() + private var dbDispatcherResource: Resource[DbDispatcher] = _ + private var dbDispatcher: DbDispatcher = _ + + protected def executeSql[T](sql: Connection => T): Future[T] = { + dbDispatcher.executeSql(metrics.test.db)(sql) + } + + override protected def beforeAll(): Unit = { + super.beforeAll() + + implicit val resourceContext: ResourceContext = ResourceContext(system.dispatcher) + implicit val loggingContext: LoggingContext = LoggingContext.ForTesting + dbDispatcherResource = for { + _ <- Resource.fromFuture( + new FlywayMigrations(jdbcUrl).migrate(enableAppendOnlySchema = true) + ) + dispatcher <- DbDispatcher + .owner( + dataSource = backend.createDataSource(jdbcUrl), + serverRole = ServerRole.Testing(this.getClass), + connectionPoolSize = connectionPoolSize, + connectionTimeout = FiniteDuration(250, "millis"), + metrics = metrics, + ) + .acquire() + } yield dispatcher + + dbDispatcher = Await.result(dbDispatcherResource.asFuture, 30.seconds) + logger.info( + s"Finished setting up database $jdbcUrl for tests. You can now connect to this database to debug failed tests. Note that tables are truncated between each test." + ) + } + + override protected def afterAll(): Unit = { + Await.result(dbDispatcherResource.release(), 30.seconds) + super.afterAll() + } + + private val runningTests = new AtomicInteger(0) + + // Each test should start with an empty database to allow testing low-level behavior + // However, creating a fresh database for each test would be too expensive. + // Instead, we truncate all tables using the reset() call before each test. + override protected def beforeEach(): Unit = { + super.beforeEach() + + assert( + runningTests.incrementAndGet() == 1, + "StorageBackendSpec tests must not run in parallel, as they all run against the same database.", + ) + Await.result(executeSql(backend.resetAll), 10.seconds) + } + + override protected def afterEach(): Unit = { + assert( + runningTests.decrementAndGet() == 0, + "StorageBackendSpec tests must not run in parallel, as they all run against the same database.", + ) + + super.afterEach() + } +} diff --git a/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendSuite.scala b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendSuite.scala new file mode 100644 index 000000000000..2b90f7def21f --- /dev/null +++ b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendSuite.scala @@ -0,0 +1,16 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import org.scalatest.flatspec.AsyncFlatSpec + +trait StorageBackendSuite + extends StorageBackendSpec + with StorageBackendTestsInitialization + with StorageBackendTestsInitializeIngestion + with StorageBackendTestsIngestion + with StorageBackendTestsReset + with StorageBackendTestsPruning { + this: AsyncFlatSpec => +} diff --git a/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestValues.scala b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestValues.scala new file mode 100644 index 000000000000..e3f100e2adc3 --- /dev/null +++ b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestValues.scala @@ -0,0 +1,254 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import java.time.{Duration, Instant} +import java.util.UUID + +import com.daml.daml_lf_dev.DamlLf +import com.daml.ledger.api.domain.{LedgerId, ParticipantId} +import com.daml.ledger.configuration.{Configuration, LedgerTimeModel} +import com.daml.ledger.offset.Offset +import com.daml.lf.data.Ref +import com.daml.lf.ledger.EventId +import com.daml.lf.transaction.NodeId +import com.daml.platform.store.appendonlydao.JdbcLedgerDao +import com.google.protobuf.ByteString + +/** Except where specified, values should be treated as opaque + */ +private[backend] object StorageBackendTestValues { + + /** Produces offsets that are ordered the same as the input value */ + def offset(x: Long): Offset = Offset.fromHexString(Ref.HexString.assertFromString(f"$x%08d")) + def ledgerEnd(o: Long, e: Long): ParameterStorageBackend.LedgerEnd = + ParameterStorageBackend.LedgerEnd(offset(o), e) + def transactionIdFromOffset(x: Offset): Ref.LedgerString = + Ref.LedgerString.assertFromString(x.toHexString) + + val someTime: Instant = Instant.now() + + val someConfiguration: Configuration = + Configuration(1, LedgerTimeModel.reasonableDefault, Duration.ofHours(23)) + + val someLedgerId: LedgerId = LedgerId("ledger") + val someParticipantId: ParticipantId = ParticipantId( + Ref.ParticipantId.assertFromString("participant") + ) + val someTemplateId: Ref.Identifier = Ref.Identifier.assertFromString("pkg:Mod:Template") + val someIdentityParams: ParameterStorageBackend.IdentityParams = + ParameterStorageBackend.IdentityParams(someLedgerId, someParticipantId) + val someParty: Ref.Party = Ref.Party.assertFromString("party") + + val someArchive: DamlLf.Archive = DamlLf.Archive.newBuilder + .setHash("00001") + .setHashFunction(DamlLf.HashFunction.SHA256) + .setPayload(ByteString.copyFromUtf8("payload 1")) + .build + + // This is not a valid serialization of a Daml-Lf value. This is ok. + // The tests never deserialize Daml-Lf values, we the just need some non-empty array + // because Oracle converts empty arrays to NULL, which then breaks non-null constraints. + val someSerializedDamlLfValue: Array[Byte] = Array.fill[Byte](8)(15) + + def dtoConfiguration( + offset: Offset, + configuration: Configuration = someConfiguration, + ): DbDto.ConfigurationEntry = + DbDto.ConfigurationEntry( + ledger_offset = offset.toHexString, + recorded_at = someTime, + submission_id = "submission_id", + typ = JdbcLedgerDao.acceptType, + configuration = Configuration.encode(configuration).toByteArray, + rejection_reason = None, + ) + + def dtoPartyEntry( + offset: Offset, + party: String = someParty.toString, + ): DbDto.PartyEntry = DbDto.PartyEntry( + ledger_offset = offset.toHexString, + recorded_at = someTime, + submission_id = Some("submission_id"), + party = Some(party), + display_name = Some(party), + typ = JdbcLedgerDao.acceptType, + rejection_reason = None, + is_local = Some(true), + ) + + def dtoParty( + offset: Offset, + party: String = someParty.toString, + ): DbDto.Party = DbDto.Party( + party = party, + display_name = Some(party), + explicit = true, + ledger_offset = Some(offset.toHexString), + is_local = true, + ) + + def dtoPackage(offset: Offset): DbDto.Package = DbDto.Package( + package_id = someArchive.getHash, + upload_id = "upload_id", + source_description = Some("source_description"), + package_size = someArchive.getPayload.size.toLong, + known_since = someTime, + ledger_offset = offset.toHexString, + _package = someArchive.toByteArray, + ) + + def dtoPackageEntry(offset: Offset): DbDto.PackageEntry = DbDto.PackageEntry( + ledger_offset = offset.toHexString, + recorded_at = someTime, + submission_id = Some("submission_id"), + typ = JdbcLedgerDao.acceptType, + rejection_reason = None, + ) + + /** A simple create event. + * Corresponds to a transaction with a single create node. + */ + def dtoCreate( + offset: Offset, + eventSequentialId: Long, + contractId: String, + signatory: String = "signatory", + observer: String = "observer", + commandId: String = UUID.randomUUID().toString, + ): DbDto.EventCreate = { + val transactionId = transactionIdFromOffset(offset) + DbDto.EventCreate( + event_offset = Some(offset.toHexString), + transaction_id = Some(transactionId), + ledger_effective_time = Some(someTime), + command_id = Some(commandId), + workflow_id = Some("workflow_id"), + application_id = Some("application_id"), + submitters = None, + node_index = Some(0), + event_id = Some(EventId(transactionId, NodeId(0)).toLedgerString), + contract_id = contractId, + template_id = Some(someTemplateId.toString), + flat_event_witnesses = Set(signatory, observer), + tree_event_witnesses = Set(signatory, observer), + create_argument = Some(someSerializedDamlLfValue), + create_signatories = Some(Set(signatory)), + create_observers = Some(Set(observer)), + create_agreement_text = None, + create_key_value = None, + create_key_hash = None, + create_argument_compression = None, + create_key_value_compression = None, + event_sequential_id = eventSequentialId, + ) + } + + /** A simple exercise event. + * Corresponds to a transaction with a single exercise node. + * + * @param signatory The signatory of the contract (see corresponding create node) + * @param actor The choice actor, who is also the submitter + */ + def dtoExercise( + offset: Offset, + eventSequentialId: Long, + consuming: Boolean, + contractId: String, + signatory: String = "signatory", + actor: String = "actor", + commandId: String = UUID.randomUUID().toString, + ): DbDto.EventExercise = { + val transactionId = transactionIdFromOffset(offset) + DbDto.EventExercise( + consuming = consuming, + event_offset = Some(offset.toHexString), + transaction_id = Some(transactionId), + ledger_effective_time = Some(someTime), + command_id = Some(commandId), + workflow_id = Some("workflow_id"), + application_id = Some("application_id"), + submitters = Some(Set(actor)), + node_index = Some(0), + event_id = Some(EventId(transactionId, NodeId(0)).toLedgerString), + contract_id = contractId, + template_id = Some(someTemplateId.toString), + flat_event_witnesses = if (consuming) Set(signatory) else Set.empty, + tree_event_witnesses = Set(signatory, actor), + create_key_value = None, + exercise_choice = Some("exercise_choice"), + exercise_argument = Some(someSerializedDamlLfValue), + exercise_result = Some(someSerializedDamlLfValue), + exercise_actors = Some(Set(actor)), + exercise_child_event_ids = Some(Set.empty), + create_key_value_compression = None, + exercise_argument_compression = None, + exercise_result_compression = None, + event_sequential_id = eventSequentialId, + ) + } + + /** A single divulgence event + */ + def dtoDivulgence( + offset: Offset, + eventSequentialId: Long, + contractId: String, + submitter: String = "signatory", + divulgee: String = "divulgee", + commandId: String = UUID.randomUUID().toString, + ): DbDto.EventDivulgence = { + DbDto.EventDivulgence( + event_offset = Some(offset.toHexString), + command_id = Some(commandId), + workflow_id = Some("workflow_id"), + application_id = Some("application_id"), + submitters = Some(Set(submitter)), + contract_id = contractId, + template_id = Some(someTemplateId.toString), + tree_event_witnesses = Set(divulgee), + create_argument = Some(someSerializedDamlLfValue), + create_argument_compression = None, + event_sequential_id = eventSequentialId, + ) + } + + def dtoCompletion( + offset: Offset, + submitter: String = "signatory", + commandId: String = UUID.randomUUID().toString, + ): DbDto.CommandCompletion = { + val transactionId = transactionIdFromOffset(offset) + DbDto.CommandCompletion( + completion_offset = offset.toHexString, + record_time = someTime, + application_id = "application_id", + submitters = Set(submitter), + command_id = commandId, + transaction_id = Some(transactionId), + rejection_status_code = None, + rejection_status_message = None, + rejection_status_details = None, + ) + } + + def dtoTransactionId(dto: DbDto): Ref.TransactionId = { + dto match { + case e: DbDto.EventCreate => Ref.TransactionId.assertFromString(e.transaction_id.get) + case e: DbDto.EventExercise => Ref.TransactionId.assertFromString(e.transaction_id.get) + case _ => sys.error(s"$dto does not have a transaction id") + } + } + + def dtoApplicationId(dto: DbDto): Ref.ApplicationId = { + dto match { + case e: DbDto.EventCreate => Ref.ApplicationId.assertFromString(e.application_id.get) + case e: DbDto.EventExercise => Ref.ApplicationId.assertFromString(e.application_id.get) + case e: DbDto.EventDivulgence => Ref.ApplicationId.assertFromString(e.application_id.get) + case e: DbDto.CommandCompletion => Ref.ApplicationId.assertFromString(e.application_id) + case _ => sys.error(s"$dto does not have an application id") + } + } +} diff --git a/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsIngestion.scala b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsIngestion.scala new file mode 100644 index 000000000000..34f1e2e74839 --- /dev/null +++ b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsIngestion.scala @@ -0,0 +1,92 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import org.scalatest.Inside +import org.scalatest.flatspec.AsyncFlatSpec +import org.scalatest.matchers.should.Matchers + +private[backend] trait StorageBackendTestsIngestion + extends Matchers + with Inside + with StorageBackendSpec { + this: AsyncFlatSpec => + + behavior of "StorageBackend (ingestion)" + + import StorageBackendTestValues._ + + it should "ingest a single configuration update" in { + val someOffset = offset(1) + val dtos = Vector( + dtoConfiguration(someOffset, someConfiguration) + ) + + for { + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + _ <- executeSql(ingest(dtos, _)) + configBeforeLedgerEndUpdate <- executeSql(backend.ledgerConfiguration) + _ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(someOffset, 0))) + configAfterLedgerEndUpdate <- executeSql(backend.ledgerConfiguration) + } yield { + // The first query is executed before the ledger end is updated. + // It should not see the already ingested configuration change. + configBeforeLedgerEndUpdate shouldBe empty + + // The second query should now see the configuration change. + inside(configAfterLedgerEndUpdate) { case Some((offset, config)) => + offset shouldBe someOffset + config shouldBe someConfiguration + } + configAfterLedgerEndUpdate should not be empty + } + } + + it should "ingest a single package update" in { + val someOffset = offset(1) + val dtos = Vector( + dtoPackage(someOffset), + dtoPackageEntry(someOffset), + ) + + for { + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + _ <- executeSql(ingest(dtos, _)) + packagesBeforeLedgerEndUpdate <- executeSql(backend.lfPackages) + _ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(someOffset, 0))) + packagesAfterLedgerEndUpdate <- executeSql(backend.lfPackages) + } yield { + // The first query is executed before the ledger end is updated. + // It should not see the already ingested package upload. + packagesBeforeLedgerEndUpdate shouldBe empty + + // The second query should now see the package. + packagesAfterLedgerEndUpdate should not be empty + } + } + + it should "ingest a single party update" in { + val someOffset = offset(1) + val dtos = Vector( + dtoParty(someOffset), + dtoPartyEntry(someOffset), + ) + + for { + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + _ <- executeSql(ingest(dtos, _)) + partiesBeforeLedgerEndUpdate <- executeSql(backend.knownParties) + _ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(someOffset, 0))) + partiesAfterLedgerEndUpdate <- executeSql(backend.knownParties) + } yield { + // The first query is executed before the ledger end is updated. + // It should not see the already ingested party allocation. + partiesBeforeLedgerEndUpdate shouldBe empty + + // The second query should now see the party. + partiesAfterLedgerEndUpdate should not be empty + } + } + +} diff --git a/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendPostgresInitializationSpec.scala b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsInitialization.scala similarity index 84% rename from ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendPostgresInitializationSpec.scala rename to ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsInitialization.scala index ba35d9fe56da..6e6bb5e2573d 100644 --- a/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendPostgresInitializationSpec.scala +++ b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsInitialization.scala @@ -9,10 +9,8 @@ import com.daml.platform.common.MismatchException import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.matchers.should.Matchers -final class StorageBackendPostgresInitializationSpec - extends AsyncFlatSpec - with StorageBackendPostgresSpec - with Matchers { +private[backend] trait StorageBackendTestsInitialization extends Matchers with StorageBackendSpec { + this: AsyncFlatSpec => behavior of "StorageBackend (initialization)" @@ -24,7 +22,7 @@ final class StorageBackendPostgresInitializationSpec for { _ <- executeSql( - storageBackend.initializeParameters( + backend.initializeParameters( ParameterStorageBackend.IdentityParams( ledgerId = ledgerId, participantId = participantId, @@ -32,7 +30,7 @@ final class StorageBackendPostgresInitializationSpec ) ) error1 <- executeSql( - storageBackend.initializeParameters( + backend.initializeParameters( ParameterStorageBackend.IdentityParams( ledgerId = otherLedgerId, participantId = participantId, @@ -40,7 +38,7 @@ final class StorageBackendPostgresInitializationSpec ) ).failed error2 <- executeSql( - storageBackend.initializeParameters( + backend.initializeParameters( ParameterStorageBackend.IdentityParams( ledgerId = ledgerId, participantId = otherParticipantId, @@ -48,7 +46,7 @@ final class StorageBackendPostgresInitializationSpec ) ).failed error3 <- executeSql( - storageBackend.initializeParameters( + backend.initializeParameters( ParameterStorageBackend.IdentityParams( ledgerId = otherLedgerId, participantId = otherParticipantId, @@ -56,7 +54,7 @@ final class StorageBackendPostgresInitializationSpec ) ).failed _ <- executeSql( - storageBackend.initializeParameters( + backend.initializeParameters( ParameterStorageBackend.IdentityParams( ledgerId = ledgerId, participantId = participantId, diff --git a/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsInitializeIngestion.scala b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsInitializeIngestion.scala new file mode 100644 index 000000000000..f1cea6796929 --- /dev/null +++ b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsInitializeIngestion.scala @@ -0,0 +1,130 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import com.daml.lf.data.Ref +import com.daml.lf.value.Value.ContractId +import org.scalatest.Inside +import org.scalatest.flatspec.AsyncFlatSpec +import org.scalatest.matchers.should.Matchers + +private[backend] trait StorageBackendTestsInitializeIngestion + extends Matchers + with Inside + with StorageBackendSpec { + this: AsyncFlatSpec => + + behavior of "StorageBackend (initializeIngestion)" + + import StorageBackendTestValues._ + + it should "report the correct ledger end" in { + val someLedgerEnd = ParameterStorageBackend.LedgerEnd(offset(1), 1L) + for { + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + endBefore <- executeSql(backend.initializeIngestion) + _ <- executeSql(backend.updateLedgerEnd(someLedgerEnd)) + endAfter <- executeSql(backend.initializeIngestion) + } yield { + endBefore shouldBe empty + endAfter shouldBe Some(someLedgerEnd) + } + } + + it should "delete overspill entries" in { + val dtos1: Vector[DbDto] = Vector( + // 1: config change + dtoConfiguration(offset(1), someConfiguration), + // 2: party allocation + dtoParty(offset(2), "party1"), + dtoPartyEntry(offset(2), "party1"), + // 3: package upload + dtoPackage(offset(3)), + dtoPackageEntry(offset(3)), + // 4: transaction with create node + dtoCreate(offset(4), 1L, "#4"), + dtoCompletion(offset(4)), + // 5: transaction with exercise node and retroactive divulgence + dtoExercise(offset(5), 2L, false, "#4"), + dtoDivulgence(offset(5), 3L, "#4"), + dtoCompletion(offset(5)), + ) + + val dtos2: Vector[DbDto] = Vector( + // 6: config change + dtoConfiguration(offset(6), someConfiguration), + // 7: party allocation + dtoParty(offset(7), "party2"), + dtoPartyEntry(offset(7), "party2"), + // 8: package upload + dtoPackage(offset(8)), + dtoPackageEntry(offset(8)), + // 9: transaction with create node + dtoCreate(offset(9), 4L, "#9"), + dtoCompletion(offset(9)), + // 10: transaction with exercise node and retroactive divulgence + dtoExercise(offset(10), 5L, false, "#9"), + dtoDivulgence(offset(10), 6L, "#9"), + dtoCompletion(offset(10)), + ) + + // TODO: make sure it's obvious these are the stakeholders of dtoCreate() nodes created above + val readers = Set(Ref.Party.assertFromString("signatory")) + + for { + // Initialize + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + + // Start the indexer (a no-op in this case) + _ <- executeSql(backend.initializeIngestion) + + // Fully insert first batch of updates + _ <- executeSql(ingest(dtos1, _)) + _ <- executeSql(backend.updateLedgerEnd(ledgerEnd(5, 3L))) + + // Partially insert second batch of updates (indexer crashes before updating ledger end) + _ <- executeSql(ingest(dtos2, _)) + + // Check the contents + parties1 <- executeSql(backend.knownParties) + config1 <- executeSql(backend.ledgerConfiguration) + packages1 <- executeSql(backend.lfPackages) + contract41 <- executeSql( + backend.activeContractWithoutArgument(readers, ContractId.V0.assertFromString("#4")) + ) + contract91 <- executeSql( + backend.activeContractWithoutArgument(readers, ContractId.V0.assertFromString("#9")) + ) + + // Restart the indexer - should delete data from the partial insert above + _ <- executeSql(backend.initializeIngestion) + + // Move the ledger end so that any non-deleted data would become visible + _ <- executeSql(backend.updateLedgerEnd(ledgerEnd(10, 6L))) + + // Check the contents + parties2 <- executeSql(backend.knownParties) + config2 <- executeSql(backend.ledgerConfiguration) + packages2 <- executeSql(backend.lfPackages) + contract42 <- executeSql( + backend.activeContractWithoutArgument(readers, ContractId.V0.assertFromString("#4")) + ) + contract92 <- executeSql( + backend.activeContractWithoutArgument(readers, ContractId.V0.assertFromString("#9")) + ) + } yield { + parties1 should have length 1 + packages1 should have size 1 + config1 shouldBe Some(offset(1) -> someConfiguration) + contract41 should not be empty + contract91 shouldBe None + + parties2 should have length 1 + packages2 should have size 1 + config2 shouldBe Some(offset(1) -> someConfiguration) + contract42 should not be empty + contract92 shouldBe None + } + } +} diff --git a/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsPruning.scala b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsPruning.scala new file mode 100644 index 000000000000..fc8a95ff0f1b --- /dev/null +++ b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsPruning.scala @@ -0,0 +1,167 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import com.daml.lf.data.Ref +import com.daml.platform.store.backend.EventStorageBackend.{FilterParams, RangeParams} +import org.scalatest.flatspec.AsyncFlatSpec +import org.scalatest.matchers.should.Matchers + +private[backend] trait StorageBackendTestsPruning extends Matchers with StorageBackendSpec { + this: AsyncFlatSpec => + + behavior of "StorageBackend (pruning)" + + import StorageBackendTestValues._ + + it should "correctly update the pruning offset" in { + val someOffset = offset(3) + for { + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + initialPruningOffset <- executeSql(backend.prunedUptoInclusive) + _ <- executeSql(backend.updatePrunedUptoInclusive(someOffset)) + updatedPruningOffset <- executeSql(backend.prunedUptoInclusive) + } yield { + initialPruningOffset shouldBe empty + updatedPruningOffset shouldBe Some(someOffset) + } + } + + it should "prune an archived contract" in { + val someParty = Ref.Party.assertFromString("party") + val create = dtoCreate( + offset = offset(1), + eventSequentialId = 1L, + contractId = "#1", + signatory = someParty, + ) + val createTransactionId = dtoTransactionId(create) + val archive = dtoExercise( + offset = offset(2), + eventSequentialId = 2L, + consuming = true, + contractId = "#1", + signatory = someParty, + ) + val range = RangeParams(0L, 2L, None, None) + val filter = FilterParams(Set(someParty), Set.empty) + for { + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + // Ingest a create and archive event + _ <- executeSql(ingest(Vector(create, archive), _)) + _ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(2), 2L))) + // Make sure the events are visible + before1 <- executeSql(backend.transactionEvents(range, filter)) + before2 <- executeSql(backend.activeContractEvents(range, filter, offset(1))) + before3 <- executeSql(backend.flatTransaction(createTransactionId, filter)) + before4 <- executeSql(backend.transactionTreeEvents(range, filter)) + before5 <- executeSql(backend.transactionTree(createTransactionId, filter)) + before6 <- executeSql(backend.rawEvents(0, 2L)) + // Prune + _ <- executeSql(backend.pruneEvents(offset(2))) + _ <- executeSql(backend.updatePrunedUptoInclusive(offset(2))) + // Make sure the events are not visible anymore + after1 <- executeSql(backend.transactionEvents(range, filter)) + after2 <- executeSql(backend.activeContractEvents(range, filter, offset(1))) + after3 <- executeSql(backend.flatTransaction(createTransactionId, filter)) + after4 <- executeSql(backend.transactionTreeEvents(range, filter)) + after5 <- executeSql(backend.transactionTree(createTransactionId, filter)) + after6 <- executeSql(backend.rawEvents(0, 2L)) + } yield { + before1 should not be empty + before2 should not be empty + before3 should not be empty + before4 should not be empty + before5 should not be empty + before6 should not be empty + + after1 shouldBe empty + after2 shouldBe empty + after3 shouldBe empty + after4 shouldBe empty + after5 shouldBe empty + after6 shouldBe empty + } + } + + it should "not prune an active contract" in { + val someParty = Ref.Party.assertFromString("party") + val create = dtoCreate( + offset = offset(1), + eventSequentialId = 1L, + contractId = "#1", + signatory = someParty, + ) + val createTransactionId = dtoTransactionId(create) + val range = RangeParams(0L, 1L, None, None) + val filter = FilterParams(Set(someParty), Set.empty) + for { + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + // Ingest a create and archive event + _ <- executeSql(ingest(Vector(create), _)) + _ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(1), 1L))) + // Make sure the events are visible + before1 <- executeSql(backend.transactionEvents(range, filter)) + before2 <- executeSql(backend.activeContractEvents(range, filter, offset(1))) + before3 <- executeSql(backend.flatTransaction(createTransactionId, filter)) + before4 <- executeSql(backend.transactionTreeEvents(range, filter)) + before5 <- executeSql(backend.transactionTree(createTransactionId, filter)) + before6 <- executeSql(backend.rawEvents(0, 1L)) + // Prune + _ <- executeSql(backend.pruneEvents(offset(1))) + _ <- executeSql(backend.updatePrunedUptoInclusive(offset(1))) + // Make sure the events are still visible - active contracts should not be pruned + after1 <- executeSql(backend.transactionEvents(range, filter)) + after2 <- executeSql(backend.activeContractEvents(range, filter, offset(1))) + after3 <- executeSql(backend.flatTransaction(createTransactionId, filter)) + after4 <- executeSql(backend.transactionTreeEvents(range, filter)) + after5 <- executeSql(backend.transactionTree(createTransactionId, filter)) + after6 <- executeSql(backend.rawEvents(0, 1L)) + } yield { + before1 should not be empty + before2 should not be empty + before3 should not be empty + before4 should not be empty + before5 should not be empty + before6 should not be empty + + // TODO is it intended that the transaction lookups don't see the active contracts? + after1 should not be empty + after2 should not be empty + after3 shouldBe empty // should not be empty + after4 should not be empty + after5 shouldBe empty // should not be empty + after6 should not be empty + } + } + + it should "prune completions" in { + val someParty = Ref.Party.assertFromString("party") + val completion = dtoCompletion( + offset = offset(1), + submitter = someParty, + ) + val applicationId = dtoApplicationId(completion) + for { + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + // Ingest a completion + _ <- executeSql(ingest(Vector(completion), _)) + _ <- executeSql(backend.updateLedgerEnd(ParameterStorageBackend.LedgerEnd(offset(1), 1L))) + // Make sure the completion is visible + before <- executeSql( + backend.commandCompletions(offset(0), offset(1), applicationId, Set(someParty)) + ) + // Prune + _ <- executeSql(backend.pruneCompletions(offset(1))) + _ <- executeSql(backend.updatePrunedUptoInclusive(offset(1))) + // Make sure the completion is not visible anymore + after <- executeSql( + backend.commandCompletions(offset(0), offset(1), applicationId, Set(someParty)) + ) + } yield { + before should not be empty + after shouldBe empty + } + } +} diff --git a/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsReset.scala b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsReset.scala new file mode 100644 index 000000000000..74387fc4c113 --- /dev/null +++ b/ledger/participant-integration-api/src/test/lib/scala/platform/store/backend/StorageBackendTestsReset.scala @@ -0,0 +1,154 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import org.scalatest.flatspec.AsyncFlatSpec +import org.scalatest.matchers.should.Matchers + +import scala.concurrent.Future + +private[backend] trait StorageBackendTestsReset extends Matchers with StorageBackendSpec { + this: AsyncFlatSpec => + + behavior of "StorageBackend (reset)" + + import StorageBackendTestValues._ + + it should "start with an empty index" in { + for { + identity <- executeSql(backend.ledgerIdentity) + end <- executeSql(backend.ledgerEnd) + parties <- executeSql(backend.knownParties) + config <- executeSql(backend.ledgerConfiguration) + packages <- executeSql(backend.lfPackages) + events <- executeSql(backend.contractStateEvents(0, Long.MaxValue)) + } yield { + identity shouldBe None + end shouldBe None + parties shouldBe empty + packages shouldBe empty + events shouldBe empty + config shouldBe None + } + } + + it should "not see any data after advancing the ledger end" in { + for { + _ <- advanceLedgerEndToMakeOldDataVisible() + parties <- executeSql(backend.knownParties) + config <- executeSql(backend.ledgerConfiguration) + packages <- executeSql(backend.lfPackages) + } yield { + parties shouldBe empty + packages shouldBe empty + config shouldBe None + } + } + + it should "reset everything except packages when using reset" in { + val dtos: Vector[DbDto] = Vector( + // 1: config change + dtoConfiguration(offset(1)), + // 2: party allocation + dtoParty(offset(2)), + dtoPartyEntry(offset(2)), + // 3: package upload + dtoPackage(offset(3)), + dtoPackageEntry(offset(3)), + // 4: transaction with create node + dtoCreate(offset(4), 1L, "#4"), + dtoCompletion(offset(4)), + // 5: transaction with exercise node and retroactive divulgence + dtoExercise(offset(5), 2L, true, "#4"), + dtoDivulgence(offset(5), 3L, "#4"), + dtoCompletion(offset(5)), + ) + + for { + // Initialize and insert some data + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + _ <- executeSql(ingest(dtos, _)) + _ <- executeSql(backend.updateLedgerEnd(ledgerEnd(5, 3L))) + + // Reset + _ <- executeSql(backend.reset) + + // Check the contents + identity <- executeSql(backend.ledgerIdentity) + end <- executeSql(backend.ledgerEnd) + events <- executeSql(backend.contractStateEvents(0, Long.MaxValue)) + + // Check the contents (queries that don't read beyond ledger end) + _ <- advanceLedgerEndToMakeOldDataVisible() + parties <- executeSql(backend.knownParties) + config <- executeSql(backend.ledgerConfiguration) + packages <- executeSql(backend.lfPackages) + } yield { + identity shouldBe None + end shouldBe None + parties shouldBe empty + packages should not be empty // Note: reset() does not delete packages + events shouldBe empty + config shouldBe None + } + } + + it should "reset everything when using resetAll" in { + val dtos: Vector[DbDto] = Vector( + // 1: config change + dtoConfiguration(offset(1)), + // 2: party allocation + dtoParty(offset(2)), + dtoPartyEntry(offset(2)), + // 3: package upload + dtoPackage(offset(3)), + dtoPackageEntry(offset(3)), + // 4: transaction with create node + dtoCreate(offset(4), 1L, "#4"), + dtoCompletion(offset(4)), + // 5: transaction with exercise node and retroactive divulgence + dtoExercise(offset(5), 2L, true, "#4"), + dtoDivulgence(offset(5), 3L, "#4"), + dtoCompletion(offset(5)), + ) + + for { + // Initialize and insert some data + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + _ <- executeSql(ingest(dtos, _)) + _ <- executeSql(backend.updateLedgerEnd(ledgerEnd(5, 3L))) + + // Reset + _ <- executeSql(backend.resetAll) + + // Check the contents (queries that do not depend on ledger end) + identity <- executeSql(backend.ledgerIdentity) + end <- executeSql(backend.ledgerEnd) + events <- executeSql(backend.contractStateEvents(0, Long.MaxValue)) + + // Check the contents (queries that don't read beyond ledger end) + _ <- advanceLedgerEndToMakeOldDataVisible() + parties <- executeSql(backend.knownParties) + config <- executeSql(backend.ledgerConfiguration) + packages <- executeSql(backend.lfPackages) + } yield { + identity shouldBe None + end shouldBe None + parties shouldBe empty + packages shouldBe empty // Note: resetAll() does delete packages + events shouldBe empty + config shouldBe None + } + } + + // Some queries are protected to never return data beyond the current ledger end. + // By advancing the ledger end to a large value, we can check whether these + // queries now find any left-over data not cleaned by reset. + private def advanceLedgerEndToMakeOldDataVisible(): Future[Unit] = { + for { + _ <- executeSql(backend.initializeParameters(someIdentityParams)) + _ <- executeSql(backend.updateLedgerEnd(ledgerEnd(10000, 10000))) + } yield () + } +} diff --git a/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendH2Spec.scala b/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendH2Spec.scala new file mode 100644 index 000000000000..f2e91407aa04 --- /dev/null +++ b/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendH2Spec.scala @@ -0,0 +1,11 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import org.scalatest.flatspec.AsyncFlatSpec + +final class StorageBackendH2Spec + extends AsyncFlatSpec + with StorageBackendProviderH2 + with StorageBackendSuite diff --git a/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendOracleSpec.scala b/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendOracleSpec.scala new file mode 100644 index 000000000000..8c92441813ec --- /dev/null +++ b/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendOracleSpec.scala @@ -0,0 +1,11 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import org.scalatest.flatspec.AsyncFlatSpec + +final class StorageBackendOracleSpec + extends AsyncFlatSpec + with StorageBackendProviderOracle + with StorageBackendSuite diff --git a/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendPostgresSpec.scala b/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendPostgresSpec.scala new file mode 100644 index 000000000000..f8eed53711d4 --- /dev/null +++ b/ledger/participant-integration-api/src/test/suite/scala/platform/store/backend/StorageBackendPostgresSpec.scala @@ -0,0 +1,11 @@ +// Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.platform.store.backend + +import org.scalatest.flatspec.AsyncFlatSpec + +final class StorageBackendPostgresSpec + extends AsyncFlatSpec + with StorageBackendProviderPostgres + with StorageBackendSuite