Skip to content

Commit

Permalink
Merge branch 'jsimnz/feat/benchmark-suite' of github.com:sourcenetwor…
Browse files Browse the repository at this point in the history
…k/defradb into jsimnz/feat/benchmark-suite
  • Loading branch information
jsimnz committed Feb 1, 2022
2 parents bfe35dd + ee3d7fc commit 97febc0
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 9 deletions.
2 changes: 1 addition & 1 deletion bench/README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# DefraDB Benchmark Suite
This folder contains the DefraDB Benchmark Suite, its related code, sub packages, utilities, and data generators.

The goal of this suite is to provide an insight to DefraDBs performance, and to provide a quantitative approach to performance analysis and comparison. As such, the benchmark results should be used soley as a relative basis, and not concrete absolute values.
The goal of this suite is to provide an insight to DefraDB's performance, and to provide a quantitative approach to performance analysis and comparison. As such, the benchmark results should be used solely as a relative basis, and not concrete absolute values.

> Database benchmarking is a notorious complex issue to provide fair evaluations, that are void of contrived examples aimed to put the database "best foot forward".
Expand Down
9 changes: 4 additions & 5 deletions bench/bench_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ var (

func init() {
// create a consistent seed value for the random package
// so we dont have random fluctuations between runs
// so we don't have random fluctuations between runs
// (specifically thinking about the fixture generation stuff)
seed := hashToInt64("https://xkcd.com/221/")
rand.Seed(seed)
Expand All @@ -44,8 +44,7 @@ func init() {
}
}

// hashToInt64 uses the FNV-1 hash to int
// algorithm
// hashToInt64 uses the FNV-1 hash to int algorithm
func hashToInt64(s string) int64 {
h := fnv.New64a()
h.Write([]byte(s))
Expand Down Expand Up @@ -104,7 +103,7 @@ func SetupDBAndCollections(b *testing.B, ctx context.Context, fixture fixtures.G
}

// Loads the given test database using the provided fixture context.
// It loads docCount number of documents asyncronously in batches of *upto*
// It loads docCount number of documents asynchronously in batches of *upto*
// writeBatchGroup.
func BackfillBenchmarkDB(b *testing.B, ctx context.Context, cols []client.Collection, fixture fixtures.Generator, docCount, opCount int, doSync bool) ([][]key.DocKey, error) {
numTypes := len(fixture.Types())
Expand Down Expand Up @@ -148,7 +147,7 @@ func BackfillBenchmarkDB(b *testing.B, ctx context.Context, cols []client.Collec
return
}

// loop forever untill commited.
// loop forever until committed.
// This was necessary when debugging and was left
// in place. The error check could prob use a wrap system
// but its fine :).
Expand Down
2 changes: 1 addition & 1 deletion db/base/descriptions.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func (col CollectionDescription) GetField(name string) (FieldDescription, bool)
}

// IndexDescription describes an Index on a Collection
// and its assocatied metadata.
// and its associated metadata.
type IndexDescription struct {
Name string
ID uint32
Expand Down
4 changes: 2 additions & 2 deletions db/collection.go
Original file line number Diff line number Diff line change
Expand Up @@ -418,10 +418,10 @@ func (c *Collection) save(ctx context.Context, txn *Txn, doc *document.Document)
}

// NOTE: We delay the final Clean() call till we know
// the commit on the transaction is successfull. If we didn't
// the commit on the transaction is successful. If we didn't
// wait, and just did it here, then *if* the commit fails down
// the line, then we have no way to roll back the state
// side-effect on the documnet func called here.
// side-effect on the document func called here.
txn.OnSuccess(func() {
doc.Clean()
})
Expand Down

0 comments on commit 97febc0

Please sign in to comment.