diff --git a/extension/storage/filestorage/README.md b/extension/storage/filestorage/README.md index 8650f963542a..127e486b74bd 100644 --- a/extension/storage/filestorage/README.md +++ b/extension/storage/filestorage/README.md @@ -12,14 +12,48 @@ The default directory is `%ProgramData%\Otelcol\FileStorage` on Windows and `/va `timeout` is the maximum time to wait for a file lock. This value does not need to be modified in most circumstances. The default timeout is `1s`. -`compaction` defines how and when files should be compacted. -For now only compaction on start of the collector is supported, and can be enabled by `compaction.on_start` option. +## Compaction +`compaction` defines how and when files should be compacted. There are two modes of compaction available (both of which can be set concurrently): +- `compaction.on_start`, which happens when collector starts +- `compaction.on_rebound`, which happens online when certain criteria are met; it's discussed in more detail below -`compaction.directory` is the directory used for compaction (as midstep). +`compaction.directory` specifies the directory used for compaction (as a midstep). `compaction.max_transaction_size` defines maximum size of the compaction transaction. A value of zero will ignore transaction sizes. +### Rebound (online) compaction + +For rebound compaction, there are two additional parameters available: +- `compaction.rebound_size_below_mib` - specifies the maximum size of actually allocated data for compaction to happen +- `compaction.rebound_total_size_above_mib` - specifies the minimum overall size of the allocated space (both actually used and free pages) + +The idea behind rebound compaction is that in certain workloads (e.g. [persistent queue](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/exporterhelper#persistent-queue)) the storage might grow significantly (e.g. when the exporter is unable to send the data due to network problem) after which it is being emptied as the underlying issue is gone (e.g. network connectivity is back). This leaves a significant space that needs to be reclaimed (also, this space is reported in memory usage as mmap() is used underneath). The optimal conditions for this to happen online is after the storage is largely drained, which is being controlled by `rebound_size_below_mib`. To make sure this is not too sensitive, there's also `rebound_total_size_above_mib` which specifie the total claimed space size that must be met for online compaction to even be considered. Consider following diagram for an example of meeting the rebound (online) compaction conditions. + +``` + ▲ + │ + │ XX............. +m │ XXXX............ +e ├───────────XXXXXXX..........──────────── rebound_total_size_above_mib +m │ XXXXXXXXX.......... +o │ XXXXXXXXXXX......... +r │ XXXXXXXXXXXXXXXXX.... +y ├─────XXXXXXXXXXXXXXXXXXXXX..──────────── rebound_size_below_mib + │ XXXXXXXXXXXXXXXXXXXXXXXXXX......... + │ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + └──────────────── time ─────────────────► + │ | | + issue draining compaction happens + starts begins and reclaims space + + X - actually used space + . - claimed but no longer used space +``` + + +## Example + ``` extensions: file_storage: diff --git a/extension/storage/filestorage/client.go b/extension/storage/filestorage/client.go index ff9a2f4d3ce0..3a356dcc450b 100644 --- a/extension/storage/filestorage/client.go +++ b/extension/storage/filestorage/client.go @@ -19,16 +19,28 @@ import ( "errors" "io/ioutil" "os" + "sync" "time" "go.etcd.io/bbolt" "go.opentelemetry.io/collector/extension/experimental/storage" + "go.uber.org/zap" ) var defaultBucket = []byte(`default`) +const ( + elapsedKey = "elapsed" + directoryKey = "directory" + tempDirectoryKey = "tempDirectory" +) + type fileStorageClient struct { - db *bbolt.DB + logger *zap.Logger + compactionMutex sync.RWMutex + db *bbolt.DB + compactionCfg *CompactionConfig + openTimeout time.Duration } func bboltOptions(timeout time.Duration) *bbolt.Options { @@ -40,7 +52,7 @@ func bboltOptions(timeout time.Duration) *bbolt.Options { } } -func newClient(filePath string, timeout time.Duration) (*fileStorageClient, error) { +func newClient(logger *zap.Logger, filePath string, timeout time.Duration, compactionCfg *CompactionConfig) (*fileStorageClient, error) { options := bboltOptions(timeout) db, err := bbolt.Open(filePath, 0600, options) if err != nil { @@ -55,7 +67,7 @@ func newClient(filePath string, timeout time.Duration) (*fileStorageClient, erro return nil, err } - return &fileStorageClient{db}, nil + return &fileStorageClient{logger: logger, db: db, compactionCfg: compactionCfg, openTimeout: timeout}, nil } // Get will retrieve data from storage that corresponds to the specified key @@ -80,7 +92,7 @@ func (c *fileStorageClient) Delete(ctx context.Context, key string) error { } // Batch executes the specified operations in order. Get operation results are updated in place -func (c *fileStorageClient) Batch(_ context.Context, ops ...storage.Operation) error { +func (c *fileStorageClient) Batch(ctx context.Context, ops ...storage.Operation) error { batch := func(tx *bbolt.Tx) error { bucket := tx.Bucket(defaultBucket) if bucket == nil { @@ -108,7 +120,20 @@ func (c *fileStorageClient) Batch(_ context.Context, ops ...storage.Operation) e return nil } - return c.db.Update(batch) + c.compactionMutex.RLock() + defer c.compactionMutex.RUnlock() + err := c.db.Update(batch) + if c.shouldCompactOnWrite() { + go func() { + onWriteErr := c.Compact(ctx, c.compactionCfg.Directory, c.openTimeout, c.compactionCfg.MaxTransactionSize) + if onWriteErr != nil { + c.logger.Error("compaction failure", + zap.String(directoryKey, c.compactionCfg.Directory), + zap.Error(onWriteErr)) + } + }() + } + return err } // Close will close the database @@ -117,40 +142,87 @@ func (c *fileStorageClient) Close(_ context.Context) error { } // Compact database. Use temporary file as helper as we cannot replace database in-place -func (c *fileStorageClient) Compact(ctx context.Context, compactionDirectory string, timeout time.Duration, maxTransactionSize int64) (*fileStorageClient, error) { +func (c *fileStorageClient) Compact(ctx context.Context, compactionDirectory string, timeout time.Duration, maxTransactionSize int64) error { + var err error + var file *os.File + var compactedDb *bbolt.DB + // create temporary file in compactionDirectory - file, err := ioutil.TempFile(compactionDirectory, "tempdb") + file, err = ioutil.TempFile(compactionDirectory, "tempdb") if err != nil { - return nil, err + return err } + defer func() { + _, statErr := file.Stat() + if statErr == nil { + // File still exists and needs to be removed + if removeErr := os.Remove(file.Name()); removeErr != nil { + c.logger.Error("removing temporary compaction file failed", zap.Error(removeErr)) + } + } + }() + // use temporary file as compaction target options := bboltOptions(timeout) + c.logger.Debug("starting compaction", + zap.String(directoryKey, c.db.Path()), + zap.String(tempDirectoryKey, file.Name())) + // cannot reuse newClient as db shouldn't contain any bucket - db, err := bbolt.Open(file.Name(), 0600, options) + compactedDb, err = bbolt.Open(file.Name(), 0600, options) if err != nil { - return nil, err + return err } - if err := bbolt.Compact(db, c.db, maxTransactionSize); err != nil { - return nil, err + c.compactionMutex.Lock() + defer c.compactionMutex.Unlock() + compactionStart := time.Now() + + if err = bbolt.Compact(compactedDb, c.db, maxTransactionSize); err != nil { + return err } dbPath := c.db.Path() - tempDBPath := db.Path() + compactedDbPath := compactedDb.Path() - db.Close() - c.Close(ctx) + c.db.Close() + compactedDb.Close() // replace current db file with compacted db file - if err := os.Remove(dbPath); err != nil { - return nil, err + if err = os.Remove(dbPath); err != nil { + return err } - if err := os.Rename(tempDBPath, dbPath); err != nil { - return nil, err + if err = os.Rename(compactedDbPath, dbPath); err != nil { + return err + } + + c.db, err = bbolt.Open(dbPath, 0600, options) + + c.logger.Info("finished compaction", + zap.String(directoryKey, dbPath), + zap.Duration(elapsedKey, time.Since(compactionStart))) + + return err +} + +// shouldCompactOnWrite checks whether the conditions for online compaction are met +func (c *fileStorageClient) shouldCompactOnWrite() bool { + if !c.compactionCfg.OnRebound { + return false } - return newClient(dbPath, timeout) + dbStats := c.db.Stats() + if dbStats.FreelistInuse > int(c.compactionCfg.ReboundSizeBelowMiB) || + dbStats.FreeAlloc+dbStats.FreelistInuse < int(c.compactionCfg.ReboundTotalSizeAboveMiB) { + return false + } + + c.logger.Debug("shouldCompactOnWrite returns true", + zap.Int("FreelistInuse", dbStats.FreelistInuse), + zap.Int("FreeAlloc", dbStats.FreeAlloc)) + + return true } diff --git a/extension/storage/filestorage/client_test.go b/extension/storage/filestorage/client_test.go index f967e2b4f79c..6e6f3bc9ec1d 100644 --- a/extension/storage/filestorage/client_test.go +++ b/extension/storage/filestorage/client_test.go @@ -26,13 +26,14 @@ import ( "github.com/stretchr/testify/require" "go.etcd.io/bbolt" "go.opentelemetry.io/collector/extension/experimental/storage" + "go.uber.org/zap" ) func TestClientOperations(t *testing.T) { tempDir := newTempDir(t) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, time.Second) + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.NoError(t, err) ctx := context.Background() @@ -67,7 +68,7 @@ func TestClientBatchOperations(t *testing.T) { tempDir := newTempDir(t) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, time.Second) + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.NoError(t, err) ctx := context.Background() @@ -185,7 +186,7 @@ func TestNewClientTransactionErrors(t *testing.T) { tempDir := newTempDir(t) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, timeout) + client, err := newClient(zap.NewNop(), dbFile, timeout, &CompactionConfig{}) require.NoError(t, err) // Create a problem @@ -204,7 +205,7 @@ func TestNewClientErrorsOnInvalidBucket(t *testing.T) { tempDir := newTempDir(t) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, time.Second) + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.Error(t, err) require.Nil(t, client) @@ -215,7 +216,7 @@ func BenchmarkClientGet(b *testing.B) { tempDir := newTempDir(b) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, time.Second) + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.NoError(b, err) ctx := context.Background() @@ -231,7 +232,7 @@ func BenchmarkClientGet100(b *testing.B) { tempDir := newTempDir(b) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, time.Second) + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.NoError(b, err) ctx := context.Background() @@ -251,7 +252,7 @@ func BenchmarkClientSet(b *testing.B) { tempDir := newTempDir(b) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, time.Second) + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.NoError(b, err) ctx := context.Background() @@ -268,7 +269,7 @@ func BenchmarkClientSet100(b *testing.B) { tempDir := newTempDir(b) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, time.Second) + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.NoError(b, err) ctx := context.Background() @@ -288,7 +289,7 @@ func BenchmarkClientDelete(b *testing.B) { tempDir := newTempDir(b) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, time.Second) + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.NoError(b, err) ctx := context.Background() @@ -311,7 +312,7 @@ func BenchmarkClientSetLargeDB(b *testing.B) { tempDir := newTempDir(b) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, time.Second) + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.NoError(b, err) ctx := context.Background() @@ -345,7 +346,7 @@ func BenchmarkClientInitLargeDB(b *testing.B) { tempDir := newTempDir(b) dbFile := filepath.Join(tempDir, "my_db") - client, err := newClient(dbFile, time.Second) + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.NoError(b, err) ctx := context.Background() @@ -361,7 +362,7 @@ func BenchmarkClientInitLargeDB(b *testing.B) { var tempClient *fileStorageClient b.ResetTimer() for n := 0; n < b.N; n++ { - tempClient, err = newClient(dbFile, time.Second) + tempClient, err = newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) require.NoError(b, err) b.StopTimer() err = tempClient.Close(ctx) @@ -370,6 +371,88 @@ func BenchmarkClientInitLargeDB(b *testing.B) { } } +func BenchmarkClientCompactLargeDBFile(b *testing.B) { + entrySizeInBytes := 1024 * 1024 + entryCount := 2000 + entry := make([]byte, entrySizeInBytes) + var testKey string + + tempDir := newTempDir(b) + dbFile := filepath.Join(tempDir, "my_db") + + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) + require.NoError(b, err) + + ctx := context.Background() + + for n := 0; n < entryCount; n++ { + testKey = fmt.Sprintf("testKey-%d", n) + client.Set(ctx, testKey, entry) + } + + // Leave one key in the db + for n := 0; n < entryCount-1; n++ { + testKey = fmt.Sprintf("testKey-%d", n) + client.Delete(ctx, testKey) + } + + client.Close(ctx) + + b.ResetTimer() + b.StopTimer() + for n := 0; n < b.N; n++ { + testDbFile := filepath.Join(tempDir, fmt.Sprintf("my_db%d", n)) + err = os.Link(dbFile, testDbFile) + require.NoError(b, err) + client, err = newClient(zap.NewNop(), testDbFile, time.Second, &CompactionConfig{}) + require.NoError(b, err) + b.StartTimer() + client.Compact(ctx, tempDir, time.Second, 65536) + b.StopTimer() + } +} + +func BenchmarkClientCompactDb(b *testing.B) { + entrySizeInBytes := 1024 * 128 + entryCount := 160 + entry := make([]byte, entrySizeInBytes) + var testKey string + + tempDir := newTempDir(b) + dbFile := filepath.Join(tempDir, "my_db") + + client, err := newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}) + require.NoError(b, err) + + ctx := context.Background() + + for n := 0; n < entryCount; n++ { + testKey = fmt.Sprintf("testKey-%d", n) + client.Set(ctx, testKey, entry) + } + + // Leave half the keys in the DB + for n := 0; n < entryCount/2; n++ { + testKey = fmt.Sprintf("testKey-%d", n) + client.Delete(ctx, testKey) + } + + client.Close(ctx) + + b.ResetTimer() + b.StopTimer() + for n := 0; n < b.N; n++ { + testDbFile := filepath.Join(tempDir, fmt.Sprintf("my_db%d", n)) + err = os.Link(dbFile, testDbFile) + require.NoError(b, err) + client, err = newClient(zap.NewNop(), testDbFile, time.Second, &CompactionConfig{}) + require.NoError(b, err) + b.StartTimer() + client.Compact(ctx, tempDir, time.Second, 65536) + b.StopTimer() + } +} + func newTempDir(tb testing.TB) string { tempDir, err := ioutil.TempDir("", "") require.NoError(tb, err) diff --git a/extension/storage/filestorage/config.go b/extension/storage/filestorage/config.go index 9a728558bc9a..5acefde40dbf 100644 --- a/extension/storage/filestorage/config.go +++ b/extension/storage/filestorage/config.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/collector/config" ) -// Config defines configuration for http forwarder extension. +// Config defines configuration for file storage extension. type Config struct { config.ExtensionSettings `mapstructure:",squash"` @@ -34,10 +34,25 @@ type Config struct { Compaction *CompactionConfig `mapstructure:"compaction,omitempty"` } +// CompactionConfig defines configuration for optional file storage compaction. type CompactionConfig struct { - OnStart bool `mapstructure:"on_start,omitempty"` - Directory string `mapstructure:"directory,omitempty"` - MaxTransactionSize int64 `mapstructure:"max_transaction_size,omitempty"` + // OnStart specifies that compaction is attempted each time on start + OnStart bool `mapstructure:"on_start,omitempty"` + // OnRebound specifies that compaction is attempted online, when rebound conditions are met. + // This typically happens when storage usage has increased, which caused increase in space allocation + // and afterwards it had most items removed. We want to run the compaction online only when there are + // not too many elements still being stored (which is an indication that "heavy usage" period is over) + // so compaction should be relatively fast and at the same time there is relatively large volume of space + // that might be reclaimed. + OnRebound bool `mapstructure:"on_rebound,omitempty"` + // Directory specifies where the temporary files for compaction will be stored + Directory string `mapstructure:"directory,omitempty"` + // ReboundSizeBelowMiB specifies the maximum actually used size for online compaction to happen + ReboundSizeBelowMiB int64 `mapstructure:"rebound_size_below_mib"` + // ReboundTotalSizeAboveMiB specifies the minimum total allocated size (both used and empty) for online compaction + ReboundTotalSizeAboveMiB int64 `mapstructure:"rebound_total_size_above_mib"` + // MaxTransactionSize specifies the maximum number of items that might be present in single compaction iteration + MaxTransactionSize int64 `mapstructure:"max_transaction_size,omitempty"` } func (cfg *Config) Validate() error { diff --git a/extension/storage/filestorage/config_test.go b/extension/storage/filestorage/config_test.go index 368ec8144d90..d84a8d889128 100644 --- a/extension/storage/filestorage/config_test.go +++ b/extension/storage/filestorage/config_test.go @@ -56,9 +56,12 @@ func TestLoadConfig(t *testing.T) { ExtensionSettings: config.NewExtensionSettings(config.NewComponentIDWithName(typeStr, "all_settings")), Directory: ".", Compaction: &CompactionConfig{ - Directory: ".", - OnStart: true, - MaxTransactionSize: 2048, + Directory: ".", + OnStart: true, + OnRebound: true, + MaxTransactionSize: 2048, + ReboundTotalSizeAboveMiB: 128, + ReboundSizeBelowMiB: 16, }, Timeout: 2 * time.Second, }, diff --git a/extension/storage/filestorage/extension.go b/extension/storage/filestorage/extension.go index 4cc311191099..e5afc6447dc9 100644 --- a/extension/storage/filestorage/extension.go +++ b/extension/storage/filestorage/extension.go @@ -18,7 +18,6 @@ import ( "context" "fmt" "path/filepath" - "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" @@ -27,12 +26,8 @@ import ( ) type localFileStorage struct { - directory string - timeout time.Duration - logger *zap.Logger - compactionDirectory string - compactOnStart bool - maxCompactionSize int64 + cfg *Config + logger *zap.Logger } // Ensure this storage extension implements the appropriate interface @@ -40,11 +35,8 @@ var _ storage.Extension = (*localFileStorage)(nil) func newLocalFileStorage(logger *zap.Logger, config *Config) (component.Extension, error) { return &localFileStorage{ - directory: filepath.Clean(config.Directory), - compactionDirectory: filepath.Clean(config.Compaction.Directory), - compactOnStart: config.Compaction.OnStart, - timeout: config.Timeout, - logger: logger, + cfg: config, + logger: logger, }, nil } @@ -69,16 +61,22 @@ func (lfs *localFileStorage) GetClient(ctx context.Context, kind component.Kind, rawName = fmt.Sprintf("%s_%s_%s_%s", kindString(kind), ent.Type(), ent.Name(), name) } // TODO sanitize rawName - absoluteName := filepath.Join(lfs.directory, rawName) - client, err := newClient(absoluteName, lfs.timeout) + absoluteName := filepath.Join(lfs.cfg.Directory, rawName) + client, err := newClient(lfs.logger, absoluteName, lfs.cfg.Timeout, lfs.cfg.Compaction) + + if err != nil { + return nil, err + } // return if compaction is not required - if err != nil || !lfs.compactOnStart { - return client, err + if lfs.cfg.Compaction.OnStart { + compactionErr := client.Compact(ctx, lfs.cfg.Compaction.Directory, lfs.cfg.Timeout, lfs.cfg.Compaction.MaxTransactionSize) + if compactionErr != nil { + lfs.logger.Error("compaction on start failed", zap.Error(compactionErr)) + } } - // perform compaction and returns client - return client.Compact(ctx, lfs.compactionDirectory, lfs.timeout, lfs.maxCompactionSize) + return client, nil } func kindString(k component.Kind) string { diff --git a/extension/storage/filestorage/extension_test.go b/extension/storage/filestorage/extension_test.go index 1a7b24fe7201..226221d63fa7 100644 --- a/extension/storage/filestorage/extension_test.go +++ b/extension/storage/filestorage/extension_test.go @@ -290,7 +290,7 @@ func TestCompaction(t *testing.T) { // compact the db c, ok := client.(*fileStorageClient) require.True(t, ok) - client, err = c.Compact(ctx, tempDir, cfg.Timeout, 1) + err = c.Compact(ctx, tempDir, cfg.Timeout, 1) require.NoError(t, err) // check size after compaction @@ -308,7 +308,7 @@ func TestCompaction(t *testing.T) { // compact after data removal c, ok = client.(*fileStorageClient) require.True(t, ok) - _, err = c.Compact(ctx, tempDir, cfg.Timeout, 1) + err = c.Compact(ctx, tempDir, cfg.Timeout, 1) require.NoError(t, err) // check size @@ -354,7 +354,7 @@ func TestCompactionRemoveTemp(t *testing.T) { // perform compaction in the same directory c, ok := client.(*fileStorageClient) require.True(t, ok) - client, err = c.Compact(ctx, tempDir, cfg.Timeout, 1) + err = c.Compact(ctx, tempDir, cfg.Timeout, 1) require.NoError(t, err) // check if only db exists in tempDir @@ -369,7 +369,7 @@ func TestCompactionRemoveTemp(t *testing.T) { c, ok = client.(*fileStorageClient) require.True(t, ok) - _, err = c.Compact(ctx, emptyTempDir, cfg.Timeout, 1) + err = c.Compact(ctx, emptyTempDir, cfg.Timeout, 1) require.NoError(t, err) // check if emptyTempDir is empty after compaction diff --git a/extension/storage/filestorage/factory.go b/extension/storage/filestorage/factory.go index a523d5d2d564..a3f889c34114 100644 --- a/extension/storage/filestorage/factory.go +++ b/extension/storage/filestorage/factory.go @@ -40,9 +40,12 @@ func createDefaultConfig() config.Extension { Compaction: &CompactionConfig{ Directory: getDefaultDirectory(), OnStart: false, + OnRebound: false, // use default bbolt value // https://github.com/etcd-io/bbolt/blob/d5db64bdbfdee1cb410894605f42ffef898f395d/cmd/bbolt/main.go#L1955 - MaxTransactionSize: 65536, + MaxTransactionSize: 65536, + ReboundSizeBelowMiB: 10, + ReboundTotalSizeAboveMiB: 100, }, Timeout: time.Second, } diff --git a/extension/storage/filestorage/testdata/config.yaml b/extension/storage/filestorage/testdata/config.yaml index 1bc59b790ccc..955b7fc02bf1 100644 --- a/extension/storage/filestorage/testdata/config.yaml +++ b/extension/storage/filestorage/testdata/config.yaml @@ -9,6 +9,9 @@ extensions: compaction: directory: . on_start: true + on_rebound: true + rebound_size_below_mib: 16 + rebound_total_size_above_mib: 128 max_transaction_size: 2048 timeout: 2s