Skip to content

Commit

Permalink
REPLAY-1963 Refactor context acquisition
Browse files Browse the repository at this point in the history
  • Loading branch information
maciejburda committed Sep 11, 2023
1 parent a693cb7 commit 79785d7
Show file tree
Hide file tree
Showing 16 changed files with 194 additions and 169 deletions.
5 changes: 1 addition & 4 deletions DatadogCore/Sources/Core/DatadogCore.swift
Original file line number Diff line number Diff line change
Expand Up @@ -319,10 +319,7 @@ internal struct DatadogCoreFeatureScope: FeatureScope {
// On user thread: request SDK context.
contextProvider.read { context in
// On context thread: request writer for current tracking consent.
let writer = storage.writer(
for: bypassConsent ? .granted : context.trackingConsent,
forceNewBatch: forceNewBatch
)
let writer = storage.writer(for: context, bypassConsent: bypassConsent, forceNewBatch: forceNewBatch)

// Still on context thread: send `Writer` to EWC caller. The writer implements `AsyncWriter`, so
// the implementation of `writer.write(value:)` will run asynchronously without blocking the context thread.
Expand Down
16 changes: 10 additions & 6 deletions DatadogCore/Sources/Core/Storage/FeatureStorage.swift
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,20 @@ internal struct FeatureStorage {
/// Telemetry interface.
let telemetry: Telemetry

func writer(for trackingConsent: TrackingConsent, forceNewBatch: Bool) -> Writer {
switch trackingConsent {
func writer(
for context: DatadogContext,
bypassConsent: Bool = false,
forceNewBatch: Bool = false
) -> Writer {
switch bypassConsent ? .granted : context.trackingConsent {
case .granted:
return AsyncWriter(
execute: FileWriter(
orchestrator: authorizedFilesOrchestrator,
forceNewFile: forceNewBatch,
encryption: encryption,
telemetry: telemetry
telemetry: telemetry,
context: context
),
on: queue
)
Expand All @@ -43,7 +48,8 @@ internal struct FeatureStorage {
orchestrator: unauthorizedFilesOrchestrator,
forceNewFile: forceNewBatch,
encryption: encryption,
telemetry: telemetry
telemetry: telemetry,
context: context
),
on: queue
)
Expand Down Expand Up @@ -125,7 +131,6 @@ extension FeatureStorage {
directory: directories.authorized,
performance: performance,
dateProvider: dateProvider,
contextProvider: contextProvider,
telemetry: telemetry,
metricsData: {
guard let trackName = BatchMetric.trackValue(for: featureName) else {
Expand All @@ -139,7 +144,6 @@ extension FeatureStorage {
directory: directories.unauthorized,
performance: performance,
dateProvider: dateProvider,
contextProvider: contextProvider,
telemetry: telemetry,
metricsData: nil // do not send metrics for unauthorized orchestrator
)
Expand Down
74 changes: 46 additions & 28 deletions DatadogCore/Sources/Core/Storage/FilesOrchestrator.swift
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,14 @@ import DatadogInternal
internal protocol FilesOrchestratorType: AnyObject {
var performance: StoragePerformancePreset { get }

func getNewWritableFile(writeSize: UInt64) throws -> WritableFile
func getWritableFile(writeSize: UInt64) throws -> WritableFile
func getReadableFile(excludingFilesNamed excludedFileNames: Set<String>) -> ReadableFile?
func delete(readableFile: ReadableFile, deletionReason: BatchDeletedMetric.RemovalReason)
func getNewWritableFile(writeSize: UInt64, context: DatadogContext) throws -> WritableFile
func getWritableFile(writeSize: UInt64, context: DatadogContext) throws -> WritableFile
func getReadableFile(excludingFilesNamed excludedFileNames: Set<String>, context: DatadogContext) -> ReadableFile?
func delete(
readableFile: ReadableFile,
deletionReason: BatchDeletedMetric.RemovalReason,
context: DatadogContext
)

var ignoreFilesAgeWhenReading: Bool { get set }
}
Expand All @@ -37,12 +41,6 @@ internal class FilesOrchestrator: FilesOrchestratorType {
/// Telemetry interface.
let telemetry: Telemetry

/// Flag indicating if the application is in background state.
var inBackground: Bool {
return contextProvider.read().applicationStateHistory.currentSnapshot.state == .background
}
let contextProvider: DatadogContextProvider

/// Extra information for metrics set from this orchestrator.
struct MetricsData {
/// The name of the track reported for this orchestrator.
Expand All @@ -58,14 +56,12 @@ internal class FilesOrchestrator: FilesOrchestratorType {
directory: Directory,
performance: StoragePerformancePreset,
dateProvider: DateProvider,
contextProvider: DatadogContextProvider,
telemetry: Telemetry,
metricsData: MetricsData? = nil
) {
self.directory = directory
self.performance = performance
self.dateProvider = dateProvider
self.contextProvider = contextProvider
self.telemetry = telemetry
self.metricsData = metricsData
}
Expand All @@ -79,19 +75,19 @@ internal class FilesOrchestrator: FilesOrchestratorType {
///
/// - Parameter writeSize: the size of data to be written
/// - Returns: `WritableFile` capable of writing data of given size
func getNewWritableFile(writeSize: UInt64) throws -> WritableFile {
func getNewWritableFile(writeSize: UInt64, context: DatadogContext) throws -> WritableFile {
try validate(writeSize: writeSize)
if let closedBatchName = lastWritableFileName {
sendBatchClosedMetric(fileName: closedBatchName, forcedNew: true)
}
return try createNewWritableFile(writeSize: writeSize)
return try createNewWritableFile(writeSize: writeSize, context: context)
}

/// Returns writable file accordingly to default heuristic of creating and reusing files.
///
/// - Parameter writeSize: the size of data to be written
/// - Returns: `WritableFile` capable of writing data of given size
func getWritableFile(writeSize: UInt64) throws -> WritableFile {
func getWritableFile(writeSize: UInt64, context: DatadogContext) throws -> WritableFile {
try validate(writeSize: writeSize)

if let lastWritableFile = reuseLastWritableFileIfPossible(writeSize: writeSize) { // if last writable file can be reused
Expand All @@ -102,7 +98,7 @@ internal class FilesOrchestrator: FilesOrchestratorType {
if let closedBatchName = lastWritableFileName {
sendBatchClosedMetric(fileName: closedBatchName, forcedNew: false)
}
return try createNewWritableFile(writeSize: writeSize)
return try createNewWritableFile(writeSize: writeSize, context: context)
}
}

Expand All @@ -112,13 +108,13 @@ internal class FilesOrchestrator: FilesOrchestratorType {
}
}

private func createNewWritableFile(writeSize: UInt64) throws -> WritableFile {
private func createNewWritableFile(writeSize: UInt64, context: DatadogContext) throws -> WritableFile {
// NOTE: RUMM-610 Because purging files directory is a memory-expensive operation, do it only when a new file
// is created (we assume here that this won't happen too often). In details, this is to avoid over-allocating
// internal `_FileCache` and `_NSFastEnumerationEnumerator` objects in downstream `FileManager` routines.
// This optimisation results with flat allocation graph in a long term (vs endlessly growing if purging
// happens too often).
try purgeFilesDirectoryIfNeeded()
try purgeFilesDirectoryIfNeeded(context: context)

let newFileName = fileNameFrom(fileCreationDate: dateProvider.now)
let newFile = try directory.createFile(named: newFileName)
Expand Down Expand Up @@ -156,11 +152,16 @@ internal class FilesOrchestrator: FilesOrchestratorType {

// MARK: - `ReadableFile` orchestration

func getReadableFile(excludingFilesNamed excludedFileNames: Set<String> = []) -> ReadableFile? {
func getReadableFile(
excludingFilesNamed excludedFileNames: Set<String> = [],
context: DatadogContext
) -> ReadableFile? {
do {
let filesWithCreationDate = try directory.files()
.map { (file: $0, creationDate: fileCreationDateFrom(fileName: $0.name)) }
.compactMap { try deleteFileIfItsObsolete(file: $0.file, fileCreationDate: $0.creationDate) }
.compactMap {
try deleteFileIfItsObsolete(file: $0.file, fileCreationDate: $0.creationDate, context: context)
}

guard let (oldestFile, creationDate) = filesWithCreationDate
.filter({ excludedFileNames.contains($0.file.name) == false })
Expand All @@ -186,10 +187,18 @@ internal class FilesOrchestrator: FilesOrchestratorType {
}
}

func delete(readableFile: ReadableFile, deletionReason: BatchDeletedMetric.RemovalReason) {
func delete(
readableFile: ReadableFile,
deletionReason: BatchDeletedMetric.RemovalReason,
context: DatadogContext
) {
do {
try readableFile.delete()
sendBatchDeletedMetric(batchFile: readableFile, deletionReason: deletionReason)
sendBatchDeletedMetric(
batchFile: readableFile,
deletionReason: deletionReason,
context: context
)
} catch {
telemetry.error("Failed to delete file", error: error)
}
Expand All @@ -201,7 +210,7 @@ internal class FilesOrchestrator: FilesOrchestratorType {
// MARK: - Directory size management

/// Removes oldest files from the directory if it becomes too big.
private func purgeFilesDirectoryIfNeeded() throws {
private func purgeFilesDirectoryIfNeeded(context: DatadogContext) throws {
let filesSortedByCreationDate = try directory.files()
.map { (file: $0, creationDate: fileCreationDateFrom(fileName: $0.name)) }
.sorted { $0.creationDate < $1.creationDate }
Expand All @@ -218,18 +227,22 @@ internal class FilesOrchestrator: FilesOrchestratorType {
while sizeFreed < sizeToFree && !filesWithSizeSortedByCreationDate.isEmpty {
let fileWithSize = filesWithSizeSortedByCreationDate.removeFirst()
try fileWithSize.file.delete()
sendBatchDeletedMetric(batchFile: fileWithSize.file, deletionReason: .purged)
sendBatchDeletedMetric(
batchFile: fileWithSize.file,
deletionReason: .purged,
context: context
)
sizeFreed += fileWithSize.size
}
}
}

private func deleteFileIfItsObsolete(file: File, fileCreationDate: Date) throws -> (file: File, creationDate: Date)? {
private func deleteFileIfItsObsolete(file: File, fileCreationDate: Date, context: DatadogContext) throws -> (file: File, creationDate: Date)? {
let fileAge = dateProvider.now.timeIntervalSince(fileCreationDate)

if fileAge > performance.maxFileAgeForRead {
try file.delete()
sendBatchDeletedMetric(batchFile: file, deletionReason: .obsolete)
sendBatchDeletedMetric(batchFile: file, deletionReason: .obsolete, context: context)
return nil
} else {
return (file: file, creationDate: fileCreationDate)
Expand All @@ -244,13 +257,18 @@ internal class FilesOrchestrator: FilesOrchestratorType {
/// - deletionReason: The reason of deleting this file.
///
/// Note: The `batchFile` doesn't exist at this point.
private func sendBatchDeletedMetric(batchFile: ReadableFile, deletionReason: BatchDeletedMetric.RemovalReason) {
private func sendBatchDeletedMetric(
batchFile: ReadableFile,
deletionReason: BatchDeletedMetric.RemovalReason,
context: DatadogContext
) {
guard let metricsData = metricsData, deletionReason.includeInMetric else {
return // do not track metrics for this orchestrator or deletion reason
}

let batchAge = dateProvider.now.timeIntervalSince(fileCreationDateFrom(fileName: batchFile.name))

let inBackground = context.applicationStateHistory.currentSnapshot.state == .background
print("👾 In background: \(inBackground)")
telemetry.metric(
name: BatchDeletedMetric.name,
attributes: [
Expand Down
9 changes: 5 additions & 4 deletions DatadogCore/Sources/Core/Storage/Reading/DataReader.swift
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
*/

import Foundation
import DatadogInternal

/// Synchronizes the work of `FileReader` on given read/write queue.
internal final class DataReader: Reader {
Expand All @@ -17,15 +18,15 @@ internal final class DataReader: Reader {
self.fileReader = fileReader
}

func readNextBatch() -> Batch? {
func readNextBatch(context: DatadogContext) -> Batch? {
queue.sync {
self.fileReader.readNextBatch()
self.fileReader.readNextBatch(context: context)
}
}

func markBatchAsRead(_ batch: Batch, reason: BatchDeletedMetric.RemovalReason) {
func markBatchAsRead(_ batch: Batch, reason: BatchDeletedMetric.RemovalReason, context: DatadogContext) {
queue.sync {
self.fileReader.markBatchAsRead(batch, reason: reason)
self.fileReader.markBatchAsRead(batch, reason: reason, context: context)
}
}
}
8 changes: 4 additions & 4 deletions DatadogCore/Sources/Core/Storage/Reading/FileReader.swift
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ internal final class FileReader: Reader {

// MARK: - Reading batches

func readNextBatch() -> Batch? {
guard let file = orchestrator.getReadableFile(excludingFilesNamed: filesRead) else {
func readNextBatch(context: DatadogContext) -> Batch? {
guard let file = orchestrator.getReadableFile(excludingFilesNamed: filesRead, context: context) else {
return nil
}

Expand Down Expand Up @@ -95,8 +95,8 @@ internal final class FileReader: Reader {

// MARK: - Accepting batches

func markBatchAsRead(_ batch: Batch, reason: BatchDeletedMetric.RemovalReason) {
orchestrator.delete(readableFile: batch.file, deletionReason: reason)
func markBatchAsRead(_ batch: Batch, reason: BatchDeletedMetric.RemovalReason, context: DatadogContext) {
orchestrator.delete(readableFile: batch.file, deletionReason: reason, context: context)
filesRead.insert(batch.file.name)
}
}
4 changes: 2 additions & 2 deletions DatadogCore/Sources/Core/Storage/Reading/Reader.swift
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,6 @@ extension Batch {

/// A type, reading batched data.
internal protocol Reader {
func readNextBatch() -> Batch?
func markBatchAsRead(_ batch: Batch, reason: BatchDeletedMetric.RemovalReason)
func readNextBatch(context: DatadogContext) -> Batch?
func markBatchAsRead(_ batch: Batch, reason: BatchDeletedMetric.RemovalReason, context: DatadogContext)
}
10 changes: 8 additions & 2 deletions DatadogCore/Sources/Core/Storage/Writing/FileWriter.swift
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,21 @@ internal struct FileWriter: Writer {
let forceNewFile: Bool
/// Telemetry interface.
let telemetry: Telemetry
/// Current context of the SDK.
let context: DatadogContext

init(
orchestrator: FilesOrchestratorType,
forceNewFile: Bool,
encryption: DataEncryption?,
telemetry: Telemetry
telemetry: Telemetry,
context: DatadogContext
) {
self.orchestrator = orchestrator
self.encryption = encryption
self.forceNewFile = forceNewFile
self.telemetry = telemetry
self.context = context
}

// MARK: - Writing data
Expand All @@ -55,7 +59,9 @@ internal struct FileWriter: Writer {
// This is to avoid a situation where event is written to one file and event metadata to another.
// If this happens, the reader will not be able to match event with its metadata.
let writeSize = UInt64(encoded.count)
let file = try forceNewFile ? orchestrator.getNewWritableFile(writeSize: writeSize) : orchestrator.getWritableFile(writeSize: writeSize)
let file = try forceNewFile
? orchestrator.getNewWritableFile(writeSize: writeSize, context: context)
: orchestrator.getWritableFile(writeSize: writeSize, context: context)
try file.append(data: encoded)
} catch {
DD.logger.error("Failed to write data", error: error)
Expand Down
15 changes: 10 additions & 5 deletions DatadogCore/Sources/Core/Upload/DataUploadWorker.swift
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ internal class DataUploadWorker: DataUploadWorkerType {
let context = contextProvider.read()
let blockersForUpload = self.uploadConditions.blockersForUpload(with: context)
let isSystemReady = blockersForUpload.isEmpty
let nextBatch = isSystemReady ? self.fileReader.readNextBatch() : nil
let nextBatch = isSystemReady ? self.fileReader.readNextBatch(context: context) : nil
if let batch = nextBatch {
self.backgroundTaskCoordinator?.beginBackgroundTask()
DD.logger.debug("⏳ (\(self.featureName)) Uploading batch...")
Expand All @@ -84,7 +84,11 @@ internal class DataUploadWorker: DataUploadWorkerType {

DD.logger.debug(" → (\(self.featureName)) not delivered, will be retransmitted: \(uploadStatus.userDebugDescription)")
} else {
self.fileReader.markBatchAsRead(batch, reason: .intakeCode(responseCode: uploadStatus.responseCode ?? -1)) // -1 is unexpected here
self.fileReader.markBatchAsRead(
batch,
reason: .intakeCode(responseCode: uploadStatus.responseCode ?? -1),
context: context
) // -1 is unexpected here
self.delay.decrease()

DD.logger.debug(" → (\(self.featureName)) accepted, won't be retransmitted: \(uploadStatus.userDebugDescription)")
Expand All @@ -101,7 +105,7 @@ internal class DataUploadWorker: DataUploadWorkerType {
}
} catch let error {
// If upload can't be initiated do not retry, so drop the batch:
self.fileReader.markBatchAsRead(batch, reason: .invalid)
self.fileReader.markBatchAsRead(batch, reason: .invalid, context: context)
telemetry.error("Failed to initiate '\(self.featureName)' data upload", error: error)
}
} else {
Expand Down Expand Up @@ -132,12 +136,13 @@ internal class DataUploadWorker: DataUploadWorkerType {
/// - It performs arbitrary upload (without checking upload condition and without re-transmitting failed uploads).
internal func flushSynchronously() {
queue.sync {
while let nextBatch = self.fileReader.readNextBatch() {
let context = contextProvider.read()
while let nextBatch = self.fileReader.readNextBatch(context: context) {
defer {
// RUMM-3459 Delete the underlying batch with `.flushed` reason that will be ignored in reported
// metrics or telemetry. This is legitimate as long as `flush()` routine is only available for testing
// purposes and never run in production apps.
self.fileReader.markBatchAsRead(nextBatch, reason: .flushed)
self.fileReader.markBatchAsRead(nextBatch, reason: .flushed, context: context)
}
do {
// Try uploading the batch and do one more retry on failure.
Expand Down
Loading

0 comments on commit 79785d7

Please sign in to comment.