diff --git a/aitelemetry/telemetrywrapper.go b/aitelemetry/telemetrywrapper.go index 36c59c212a..7caacf2979 100644 --- a/aitelemetry/telemetrywrapper.go +++ b/aitelemetry/telemetrywrapper.go @@ -27,6 +27,7 @@ const ( azurePublicCloudStr = "AzurePublicCloud" hostNameKey = "hostname" defaultTimeout = 10 + maxCloseTimeoutInSeconds = 30 defaultBatchIntervalInSecs = 15 defaultBatchSizeInBytes = 32768 defaultGetEnvRetryCount = 5 @@ -330,8 +331,35 @@ func (th *telemetryHandle) Close(timeout int) { timeout = defaultTimeout } + // max wait is the minimum of the timeout and maxCloseTimeoutInSeconds + maxWaitTimeInSeconds := timeout + if maxWaitTimeInSeconds < maxCloseTimeoutInSeconds { + maxWaitTimeInSeconds = maxCloseTimeoutInSeconds + } + // wait for items to be sent otherwise timeout - <-th.client.Channel().Close(time.Duration(timeout) * time.Second) + // similar to the example in the appinsights-go repo: https://github.com/microsoft/ApplicationInsights-Go#shutdown + timer := time.NewTimer(time.Duration(maxWaitTimeInSeconds) * time.Second) + defer timer.Stop() + select { + case <-th.client.Channel().Close(time.Duration(timeout) * time.Second): + // timeout specified for retries. + + // If we got here, then all telemetry was submitted + // successfully, and we can proceed to exiting. + + case <-timer.C: + // absolute timeout. This covers any + // previous telemetry submission that may not have + // completed before Close was called. + + // There are a number of reasons we could have + // reached here. We gave it a go, but telemetry + // submission failed somewhere. Perhaps old events + // were still retrying, or perhaps we're throttled. + // Either way, we don't want to wait around for it + // to complete, so let's just exit. + } // Remove diganostic message listener if th.diagListener != nil {