Skip to content

Commit

Permalink
feat(analysis): Add Dry-Run Mode
Browse files Browse the repository at this point in the history
Signed-off-by: Rohit Agrawal <[email protected]>
  • Loading branch information
agrawroh committed Nov 11, 2021
1 parent f77b2c5 commit 6b756c3
Show file tree
Hide file tree
Showing 15 changed files with 381 additions and 136 deletions.
192 changes: 131 additions & 61 deletions analysis/analysis.go

Large diffs are not rendered by default.

163 changes: 112 additions & 51 deletions analysis/analysis_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,18 +91,20 @@ func newRun() *v1alpha1.AnalysisRun {
}

// newTerminatingRun returns a run which is terminating because of the given status
func newTerminatingRun(status v1alpha1.AnalysisPhase) *v1alpha1.AnalysisRun {
func newTerminatingRun(status v1alpha1.AnalysisPhase, isDryRun bool) *v1alpha1.AnalysisRun {
run := v1alpha1.AnalysisRun{
Spec: v1alpha1.AnalysisRunSpec{
Metrics: []v1alpha1.Metric{
{
Name: "run-forever",
Name: "run-forever",
DryRun: isDryRun,
Provider: v1alpha1.MetricProvider{
Job: &v1alpha1.JobMetric{},
},
},
{
Name: "failed-metric",
Name: "failed-metric",
DryRun: isDryRun,
Provider: v1alpha1.MetricProvider{
Job: &v1alpha1.JobMetric{},
},
Expand All @@ -113,16 +115,18 @@ func newTerminatingRun(status v1alpha1.AnalysisPhase) *v1alpha1.AnalysisRun {
Phase: v1alpha1.AnalysisPhaseRunning,
MetricResults: []v1alpha1.MetricResult{
{
Name: "run-forever",
Phase: v1alpha1.AnalysisPhaseRunning,
Name: "run-forever",
DryRun: isDryRun,
Phase: v1alpha1.AnalysisPhaseRunning,
Measurements: []v1alpha1.Measurement{{
Phase: v1alpha1.AnalysisPhaseRunning,
StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),
}},
},
{
Name: "failed-metric",
Count: 1,
Name: "failed-metric",
Count: 1,
DryRun: isDryRun,
Measurements: []v1alpha1.Measurement{{
Phase: status,
StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),
Expand Down Expand Up @@ -941,7 +945,7 @@ func TestReconcileAnalysisRunTerminateSiblingAfterFail(t *testing.T) {
f.provider.On("Terminate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil)

for _, status := range []v1alpha1.AnalysisPhase{v1alpha1.AnalysisPhaseFailed, v1alpha1.AnalysisPhaseInconclusive, v1alpha1.AnalysisPhaseError} {
run := newTerminatingRun(status)
run := newTerminatingRun(status, false)
newRun := c.reconcileAnalysisRun(run)

assert.Equal(t, status, newRun.Status.Phase)
Expand All @@ -950,8 +954,8 @@ func TestReconcileAnalysisRunTerminateSiblingAfterFail(t *testing.T) {
// ensure the in-progress measurement is now terminated
assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.MetricResults[0].Measurements[0].Phase)
assert.NotNil(t, newRun.Status.MetricResults[0].Measurements[0].FinishedAt)
assert.Equal(t, "metric terminated", newRun.Status.MetricResults[0].Message)
assert.Equal(t, "metric terminated", newRun.Status.MetricResults[0].Measurements[0].Message)
assert.Equal(t, "Metric Terminated", newRun.Status.MetricResults[0].Message)
assert.Equal(t, "Metric Terminated", newRun.Status.MetricResults[0].Measurements[0].Message)
}
}

Expand Down Expand Up @@ -1069,22 +1073,26 @@ func TestResolveMetricArgsUnableToSubstitute(t *testing.T) {
f := newFixture(t)
defer f.Close()
c, _, _ := f.newController(noResyncPeriodFunc)
run := &v1alpha1.AnalysisRun{
Spec: v1alpha1.AnalysisRunSpec{
Metrics: []v1alpha1.Metric{{
Name: "rate",
SuccessCondition: "{{args.does-not-exist}}",
Provider: v1alpha1.MetricProvider{
Prometheus: &v1alpha1.PrometheusMetric{
Query: "{{args.metric-name}}",
// Dry-Run or not if the args resolution fails then we should fail the analysis
for _, isDryRun := range [3]bool{false, true, false} {
run := &v1alpha1.AnalysisRun{
Spec: v1alpha1.AnalysisRunSpec{
Metrics: []v1alpha1.Metric{{
Name: "rate",
DryRun: isDryRun,
SuccessCondition: "{{args.does-not-exist}}",
Provider: v1alpha1.MetricProvider{
Prometheus: &v1alpha1.PrometheusMetric{
Query: "{{args.metric-name}}",
},
},
},
}},
},
}},
},
}
newRun := c.reconcileAnalysisRun(run)
assert.Equal(t, v1alpha1.AnalysisPhaseError, newRun.Status.Phase)
assert.Equal(t, "Unable to resolve metric arguments: failed to resolve {{args.metric-name}}", newRun.Status.Message)
}
newRun := c.reconcileAnalysisRun(run)
assert.Equal(t, v1alpha1.AnalysisPhaseError, newRun.Status.Phase)
assert.Equal(t, "unable to resolve metric arguments: failed to resolve {{args.metric-name}}", newRun.Status.Message)
}

// TestSecretContentReferenceSuccess verifies that secret arguments are properly resolved
Expand Down Expand Up @@ -1396,72 +1404,114 @@ func TestAssessMetricFailureInconclusiveOrError(t *testing.T) {
assert.Equal(t, phase, assessMetricStatus(metric, result, true))
}

func TestAssessRunStatusErrorMessageAnalysisPhaseFail(t *testing.T) {
func StartAssessRunStatusErrorMessageAnalysisPhaseFail(t *testing.T, isDryRun bool) (v1alpha1.AnalysisPhase, string) {
f := newFixture(t)
defer f.Close()
c, _, _ := f.newController(noResyncPeriodFunc)

run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed)
run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed, isDryRun)
run.Status.MetricResults[0].Phase = v1alpha1.AnalysisPhaseSuccessful
status, message := c.assessRunStatus(run, run.Spec.Metrics)
return c.assessRunStatus(run, run.Spec.Metrics)
}

func TestAssessRunStatusErrorMessageAnalysisPhaseFail(t *testing.T) {
status, message := StartAssessRunStatusErrorMessageAnalysisPhaseFail(t, false)
assert.Equal(t, v1alpha1.AnalysisPhaseFailed, status)
assert.Equal(t, "metric \"failed-metric\" assessed Failed due to failed (1) > failureLimit (0)", message)
assert.Equal(t, "Metric \"failed-metric\" assessed Failed due to failed (1) > failureLimit (0)", message)
}

// TestAssessRunStatusErrorMessageFromProvider verifies that the message returned by assessRunStatus
// includes the error message from the provider
func TestAssessRunStatusErrorMessageFromProvider(t *testing.T) {
func TestAssessRunStatusErrorMessageAnalysisPhaseFailInDryRunMode(t *testing.T) {
status, message := StartAssessRunStatusErrorMessageAnalysisPhaseFail(t, true)
assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, status)
assert.Equal(t, "Dry-Run Summary: Total=2, Successful=1, Failed=1, Inconclusive=0", message)
}

func StartAssessRunStatusErrorMessageFromProvider(t *testing.T, providerMessage string, isDryRun bool) (v1alpha1.AnalysisPhase, string) {
f := newFixture(t)
defer f.Close()
c, _, _ := f.newController(noResyncPeriodFunc)

run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed)
run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed, isDryRun)
run.Status.MetricResults[0].Phase = v1alpha1.AnalysisPhaseSuccessful // All metrics must complete, or assessRunStatus will not return message

providerMessage := "Provider error"
run.Status.MetricResults[1].Message = providerMessage

status, message := c.assessRunStatus(run, run.Spec.Metrics)
expectedMessage := fmt.Sprintf("metric \"failed-metric\" assessed Failed due to failed (1) > failureLimit (0): \"Error Message: %s\"", providerMessage)
return c.assessRunStatus(run, run.Spec.Metrics)
}

// TestAssessRunStatusErrorMessageFromProvider verifies that the message returned by assessRunStatus
// includes the error message from the provider
func TestAssessRunStatusErrorMessageFromProvider(t *testing.T) {
providerMessage := "Provider Error"
status, message := StartAssessRunStatusErrorMessageFromProvider(t, providerMessage, false)
expectedMessage := fmt.Sprintf("Metric \"failed-metric\" assessed Failed due to failed (1) > failureLimit (0): \"Error Message: %s\"", providerMessage)
assert.Equal(t, v1alpha1.AnalysisPhaseFailed, status)
assert.Equal(t, expectedMessage, message)
}

// TestAssessRunStatusMultipleFailures verifies that if there are multiple failed metrics, assessRunStatus returns the message
// from the first failed metric
func TestAssessRunStatusMultipleFailures(t *testing.T) {
func TestAssessRunStatusErrorMessageFromProviderInDryRunMode(t *testing.T) {
providerMessage := "Provider Error"
status, message := StartAssessRunStatusErrorMessageFromProvider(t, providerMessage, true)
assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, status)
assert.Equal(t, "Dry-Run Summary: Total=2, Successful=1, Failed=1, Inconclusive=0", message)
}

func StartAssessRunStatusMultipleFailures(t *testing.T, isDryRun bool) (v1alpha1.AnalysisPhase, string) {
f := newFixture(t)
defer f.Close()
c, _, _ := f.newController(noResyncPeriodFunc)

run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed)
run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed, isDryRun)
run.Status.MetricResults[0].Phase = v1alpha1.AnalysisPhaseFailed
run.Status.MetricResults[0].Failed = 1

status, message := c.assessRunStatus(run, run.Spec.Metrics)
return c.assessRunStatus(run, run.Spec.Metrics)
}

// TestAssessRunStatusMultipleFailures verifies that if there are multiple failed metrics, assessRunStatus returns the message
// from the first failed metric
func TestAssessRunStatusMultipleFailures(t *testing.T) {
status, message := StartAssessRunStatusMultipleFailures(t, false)
assert.Equal(t, v1alpha1.AnalysisPhaseFailed, status)
assert.Equal(t, "metric \"run-forever\" assessed Failed due to failed (1) > failureLimit (0)", message)
assert.Equal(t, "Metric \"run-forever\" assessed Failed due to failed (1) > failureLimit (0)", message)
}

// TestAssessRunStatusWorstMessageInReconcileAnalysisRun verifies that the worstMessage returned by assessRunStatus is set as the
// status of the AnalysisRun returned by reconcileAnalysisRun
func TestAssessRunStatusWorstMessageInReconcileAnalysisRun(t *testing.T) {
func TestAssessRunStatusMultipleFailuresInDryRunMode(t *testing.T) {
status, message := StartAssessRunStatusMultipleFailures(t, true)
assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, status)
assert.Equal(t, "Dry-Run Summary: Total=2, Successful=0, Failed=2, Inconclusive=0", message)
}

func StartAssessRunStatusWorstMessageInReconcileAnalysisRun(t *testing.T, isDryRun bool) *v1alpha1.AnalysisRun {
f := newFixture(t)
defer f.Close()
c, _, _ := f.newController(noResyncPeriodFunc)

run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed)
run := newTerminatingRun(v1alpha1.AnalysisPhaseFailed, isDryRun)
run.Status.MetricResults[0].Phase = v1alpha1.AnalysisPhaseFailed
run.Status.MetricResults[0].Failed = 1

f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseFailed), nil)

newRun := c.reconcileAnalysisRun(run)
return c.reconcileAnalysisRun(run)
}

// TestAssessRunStatusWorstMessageInReconcileAnalysisRun verifies that the worstMessage returned by assessRunStatus is set as the
// status of the AnalysisRun returned by reconcileAnalysisRun
func TestAssessRunStatusWorstMessageInReconcileAnalysisRun(t *testing.T) {
newRun := StartAssessRunStatusWorstMessageInReconcileAnalysisRun(t, false)
assert.Equal(t, v1alpha1.AnalysisPhaseFailed, newRun.Status.Phase)
assert.Equal(t, "metric \"run-forever\" assessed Failed due to failed (1) > failureLimit (0)", newRun.Status.Message)
assert.Equal(t, "Metric \"run-forever\" assessed Failed due to failed (1) > failureLimit (0)", newRun.Status.Message)
}

func TestTerminateAnalysisRun(t *testing.T) {
func TestAssessRunStatusWorstMessageInReconcileAnalysisRunInDryRunMode(t *testing.T) {
newRun := StartAssessRunStatusWorstMessageInReconcileAnalysisRun(t, true)
assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase)
assert.Equal(t, "Dry-Run Summary: Total=2, Successful=0, Failed=2, Inconclusive=0", newRun.Status.Message)
assert.Equal(t, "Metric assessed Failed due to failed (1) > failureLimit (0)", newRun.Status.MetricResults[0].Message)
assert.Equal(t, "Metric assessed Failed due to failed (1) > failureLimit (0)", newRun.Status.MetricResults[1].Message)
}

func StartTerminatingAnalysisRun(t *testing.T, isDryRun bool) *v1alpha1.AnalysisRun {
f := newFixture(t)
defer f.Close()
c, _, _ := f.newController(noResyncPeriodFunc)
Expand All @@ -1480,6 +1530,7 @@ func TestTerminateAnalysisRun(t *testing.T) {
},
Metrics: []v1alpha1.Metric{{
Name: "success-rate",
DryRun: isDryRun,
InitialDelay: "20s",
Interval: "20s",
SuccessCondition: "result[0] > 0.90",
Expand All @@ -1493,7 +1544,17 @@ func TestTerminateAnalysisRun(t *testing.T) {
Phase: v1alpha1.AnalysisPhaseRunning,
},
}
newRun := c.reconcileAnalysisRun(run)
return c.reconcileAnalysisRun(run)
}

func TestTerminateAnalysisRun(t *testing.T) {
newRun := StartTerminatingAnalysisRun(t, false)
assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase)
assert.Equal(t, "Run Terminated", newRun.Status.Message)
}

func TestTerminateAnalysisRunInDryRunMode(t *testing.T) {
newRun := StartTerminatingAnalysisRun(t, true)
assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase)
assert.Equal(t, "run terminated", newRun.Status.Message)
assert.Equal(t, "Run Terminated; Dry-Run Summary: Total=1, Successful=0, Failed=0, Inconclusive=0", newRun.Status.Message)
}
18 changes: 10 additions & 8 deletions controller/metrics/analysis.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package metrics

import (
"fmt"

"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/labels"
Expand Down Expand Up @@ -83,17 +85,17 @@ func collectAnalysisRuns(ch chan<- prometheus.Metric, ar *v1alpha1.AnalysisRun)
for _, metric := range ar.Spec.Metrics {
metricType := metricproviders.Type(metric)
metricResult := analysis.GetResult(ar, metric.Name)
addGauge(MetricAnalysisRunMetricType, 1, metric.Name, metricType)
addGauge(MetricAnalysisRunMetricType, 1, metric.Name, metricType, fmt.Sprint(metric.DryRun))
calculatedPhase := v1alpha1.AnalysisPhase("")
if metricResult != nil {
calculatedPhase = metricResult.Phase
}
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhasePending || calculatedPhase == ""), metric.Name, metricType, string(v1alpha1.AnalysisPhasePending))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseError), metric.Name, metricType, string(v1alpha1.AnalysisPhaseError))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseFailed), metric.Name, metricType, string(v1alpha1.AnalysisPhaseFailed))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseSuccessful), metric.Name, metricType, string(v1alpha1.AnalysisPhaseSuccessful))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseRunning), metric.Name, metricType, string(v1alpha1.AnalysisPhaseRunning))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseInconclusive), metric.Name, metricType, string(v1alpha1.AnalysisPhaseInconclusive))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhasePending || calculatedPhase == ""), metric.Name, metricType, fmt.Sprint(metric.DryRun), string(v1alpha1.AnalysisPhasePending))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseError), metric.Name, metricType, fmt.Sprint(metric.DryRun), string(v1alpha1.AnalysisPhaseError))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseFailed), metric.Name, metricType, fmt.Sprint(metric.DryRun), string(v1alpha1.AnalysisPhaseFailed))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseSuccessful), metric.Name, metricType, fmt.Sprint(metric.DryRun), string(v1alpha1.AnalysisPhaseSuccessful))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseRunning), metric.Name, metricType, fmt.Sprint(metric.DryRun), string(v1alpha1.AnalysisPhaseRunning))
addGauge(MetricAnalysisRunMetricPhase, boolFloat64(calculatedPhase == v1alpha1.AnalysisPhaseInconclusive), metric.Name, metricType, fmt.Sprint(metric.DryRun), string(v1alpha1.AnalysisPhaseInconclusive))
}
}

Expand All @@ -106,6 +108,6 @@ func collectAnalysisTemplate(ch chan<- prometheus.Metric, namespace, name string

for _, metric := range at.Metrics {
metricType := metricproviders.Type(metric)
addGauge(MetricAnalysisTemplateMetricInfo, 1, metricType, metric.Name)
addGauge(MetricAnalysisTemplateMetricInfo, 1, metricType, metric.Name, fmt.Sprint(metric.DryRun))
}
}
Loading

0 comments on commit 6b756c3

Please sign in to comment.