-
Notifications
You must be signed in to change notification settings - Fork 3.6k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Handle distributed queries when shards != data nodes
Fixes #2272 There was previously a explict panic put in the query engine to prevent queries where the number of shards was not equal to the number of data nodes in the cluster. This was waiting for the distributed queries branch to land but was not removed when that landed. There may be a more efficient way to do fix this but this fix simply queries all the shards and merges their outputs. Previously, the code assumed that only one shard would be hit. Querying multiple shards ended up producing duplicate values during the map phase so the map output needed to be merged as opposed to appended to avoid the dups.
- Loading branch information
Showing
4 changed files
with
202 additions
and
52 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,93 @@ | ||
package influxql | ||
|
||
import ( | ||
"testing" | ||
"time" | ||
) | ||
|
||
func TestMergeOutputs(t *testing.T) { | ||
job := MapReduceJob{} | ||
|
||
test := []struct { | ||
name string | ||
first []*rawQueryMapOutput | ||
second []*rawQueryMapOutput | ||
expected []*rawQueryMapOutput | ||
}{ | ||
{ | ||
name: "empty slices", | ||
first: []*rawQueryMapOutput{}, | ||
second: []*rawQueryMapOutput{}, | ||
expected: []*rawQueryMapOutput{}, | ||
}, | ||
{ | ||
name: "first empty", | ||
first: []*rawQueryMapOutput{}, | ||
second: []*rawQueryMapOutput{&rawQueryMapOutput{time.Unix(0, 0).UnixNano(), 0}}, | ||
expected: []*rawQueryMapOutput{&rawQueryMapOutput{time.Unix(0, 0).UnixNano(), 0}}, | ||
}, | ||
{ | ||
name: "second empty", | ||
first: []*rawQueryMapOutput{&rawQueryMapOutput{time.Unix(0, 0).UnixNano(), 0}}, | ||
second: []*rawQueryMapOutput{}, | ||
expected: []*rawQueryMapOutput{&rawQueryMapOutput{time.Unix(0, 0).UnixNano(), 0}}, | ||
}, | ||
{ | ||
name: "first before", | ||
first: []*rawQueryMapOutput{&rawQueryMapOutput{time.Unix(0, 0).UnixNano(), 0}}, | ||
second: []*rawQueryMapOutput{&rawQueryMapOutput{time.Unix(1, 0).UnixNano(), 0}}, | ||
expected: []*rawQueryMapOutput{ | ||
&rawQueryMapOutput{time.Unix(0, 0).UnixNano(), 0}, | ||
&rawQueryMapOutput{time.Unix(1, 0).UnixNano(), 0}, | ||
}, | ||
}, | ||
{ | ||
name: "second before", | ||
first: []*rawQueryMapOutput{&rawQueryMapOutput{time.Unix(1, 0).UnixNano(), 0}}, | ||
second: []*rawQueryMapOutput{&rawQueryMapOutput{time.Unix(0, 0).UnixNano(), 0}}, | ||
expected: []*rawQueryMapOutput{ | ||
&rawQueryMapOutput{time.Unix(0, 0).UnixNano(), 0}, | ||
&rawQueryMapOutput{time.Unix(1, 0).UnixNano(), 0}, | ||
}, | ||
}, | ||
{ | ||
name: "dups removed", | ||
first: []*rawQueryMapOutput{&rawQueryMapOutput{time.Unix(1, 0).UnixNano(), 0}}, | ||
second: []*rawQueryMapOutput{&rawQueryMapOutput{time.Unix(1, 0).UnixNano(), 0}}, | ||
expected: []*rawQueryMapOutput{ | ||
&rawQueryMapOutput{time.Unix(1, 0).UnixNano(), 0}, | ||
}, | ||
}, | ||
{ | ||
name: "sorted dups removed", | ||
first: []*rawQueryMapOutput{ | ||
&rawQueryMapOutput{time.Unix(0, 0).UnixNano(), 0}, | ||
&rawQueryMapOutput{time.Unix(1, 0).UnixNano(), 0}, | ||
}, | ||
second: []*rawQueryMapOutput{ | ||
&rawQueryMapOutput{time.Unix(1, 0).UnixNano(), 0}, | ||
&rawQueryMapOutput{time.Unix(2, 0).UnixNano(), 0}, | ||
}, | ||
expected: []*rawQueryMapOutput{ | ||
&rawQueryMapOutput{time.Unix(0, 0).UnixNano(), 0}, | ||
&rawQueryMapOutput{time.Unix(1, 0).UnixNano(), 0}, | ||
&rawQueryMapOutput{time.Unix(2, 0).UnixNano(), 0}, | ||
}, | ||
}, | ||
} | ||
|
||
for _, c := range test { | ||
got := job.mergeOutputs(c.first, c.second) | ||
|
||
if len(got) != len(c.expected) { | ||
t.Errorf("test %s: result length mismatch: got %v, exp %v", c.name, len(got), len(c.expected)) | ||
} | ||
|
||
for j := 0; j < len(c.expected); j++ { | ||
if exp := c.expected[j]; exp.Timestamp != got[j].Timestamp { | ||
t.Errorf("test %s: timestamp mismatch: got %v, exp %v", c.name, got[j].Timestamp, exp.Timestamp) | ||
} | ||
} | ||
|
||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters