Skip to content

Commit

Permalink
refactor: rework existing JobBackoffMigration tests
Browse files Browse the repository at this point in the history
Previous test used directly the column families to set up and update value,
which is really error prone since underlying or usage of column families
can change and this is not reflected in these test. Meaning they will not fail, as we saw.

This commits couples the test with the actual JobState in order to set up the column family
data correctly, and update it in an expected way.

In this case the migration can also work properly and we can verify whether clean up
works ok or not

(cherry picked from commit 9c0f534)
  • Loading branch information
ChrisKujawa authored and github-actions[bot] committed Nov 30, 2023
1 parent 6719061 commit f6f2b03
Showing 1 changed file with 33 additions and 41 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import io.camunda.zeebe.engine.util.ProcessingStateExtension;
import io.camunda.zeebe.protocol.ZbColumnFamilies;
import io.camunda.zeebe.protocol.impl.record.value.job.JobRecord;
import java.util.ArrayList;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
Expand Down Expand Up @@ -60,47 +61,30 @@ public void setup() {
jobKey.wrapLong(1);
}

// regression test of https://github.com/camunda/zeebe/issues/14329
@Test
public void afterCleanupValidTimeoutIsStillPresent() {
// given
final int deadline = 123;
jobsColumnFamily.upsert(jobKey, createJobRecordValue(deadline));
backoffKey.wrapLong(deadline);
backoffColumnFamily.upsert(backoffJobKey, DbNil.INSTANCE);

// when
jobBackoffCleanupMigration.runMigration(processingState);

// then
assertThat(backoffColumnFamily.exists(backoffJobKey)).isTrue();
}

@Test
public void afterCleanupOrphanedBackoffIsDeleted() {
public void shoulCleanOrphanBackoffEntries() {
// given
jobsColumnFamily.upsert(jobKey, new JobRecordValue());
backoffKey.wrapLong(123);
backoffColumnFamily.upsert(backoffJobKey, DbNil.INSTANCE);
final MutableJobState jobState = processingState.getJobState();
final JobRecord record = createJobRecord(1000);
jobState.create(jobKey.getValue(), record);
jobState.fail(jobKey.getValue(), record);
jobsColumnFamily.deleteExisting(jobKey);

// when
jobBackoffCleanupMigration.runMigration(processingState);

// then
assertThat(backoffColumnFamily.exists(backoffJobKey)).isFalse();
assertThat(backoffColumnFamily.isEmpty()).isTrue();
}

// regression test of https://github.com/camunda/zeebe/issues/14329
@Test
public void shouldNotCleanUpFailedJobs() {
// given
final MutableJobState jobState = processingState.getJobState();
final JobRecord record = new JobRecord();
record.setType("test");
final JobRecord record = createJobRecord(1000);
jobState.create(jobKey.getValue(), record);
record.setRetries(3);
record.setRetryBackoff(1000);
record.setRecurringTime(System.currentTimeMillis() + 1000);
jobState.fail(jobKey.getValue(), record);

// when
Expand All @@ -110,30 +94,38 @@ public void shouldNotCleanUpFailedJobs() {
assertThat(backoffColumnFamily.isEmpty()).isFalse();
}

// regression test of https://github.com/camunda/zeebe/issues/14329
@Test
public void afterCleanupTimeoutWithNonMatchingRetryBackoffIsDeleted() {
public void shoulCleanDuplicatedBackoffEntries() {
// given
final int firstRetryBackoff = 123;
final int secondRetryBackoff = 456;
jobsColumnFamily.upsert(jobKey, createJobRecordValue(secondRetryBackoff));
backoffKey.wrapLong(firstRetryBackoff);
backoffColumnFamily.upsert(backoffJobKey, DbNil.INSTANCE);
backoffKey.wrapLong(secondRetryBackoff);
backoffColumnFamily.upsert(backoffJobKey, DbNil.INSTANCE);
final MutableJobState jobState = processingState.getJobState();
final JobRecord record = createJobRecord(1000);
jobState.create(jobKey.getValue(), record);
jobState.fail(jobKey.getValue(), record);

// second fail will cause duplicate entry and orphan the first backoff
record.setRecurringTime(System.currentTimeMillis() + 1001);
jobState.fail(jobKey.getValue(), record);

// when
jobBackoffCleanupMigration.runMigration(processingState);

// then
backoffKey.wrapLong(firstRetryBackoff);
assertThat(backoffColumnFamily.exists(backoffJobKey)).isFalse();
backoffKey.wrapLong(secondRetryBackoff);
assertThat(backoffColumnFamily.exists(backoffJobKey)).isTrue();
assertThat(backoffColumnFamily.isEmpty()).isFalse();
final var keys = new ArrayList<DbCompositeKey<DbLong, DbForeignKey<DbLong>>>();
backoffColumnFamily.forEach((k, v) -> keys.add(k));
assertThat(keys).hasSize(1);
assertThat(keys)
.extracting(DbCompositeKey::second)
.extracting(DbForeignKey::inner)
.contains(jobKey);
}

private static JobRecordValue createJobRecordValue(final long retryBackoff) {
final JobRecordValue jobRecordValue = new JobRecordValue();
jobRecordValue.setRecordWithoutVariables(new JobRecord().setRetryBackoff(retryBackoff));
return jobRecordValue;
private static JobRecord createJobRecord(final long retryBackoff) {
return new JobRecord()
.setType("test")
.setRetries(3)
.setRetryBackoff(retryBackoff)
.setRecurringTime(System.currentTimeMillis() + retryBackoff);
}
}

0 comments on commit f6f2b03

Please sign in to comment.