6
6
"github.com/go-kit/log"
7
7
"github.com/go-kit/log/level"
8
8
"github.com/opentracing/opentracing-go"
9
+ "github.com/pkg/errors"
9
10
"github.com/prometheus/client_golang/prometheus"
10
11
"github.com/prometheus/client_golang/prometheus/promauto"
11
12
"github.com/prometheus/common/model"
@@ -99,9 +100,7 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
99
100
preFilterChunks := len (chunkRefs )
100
101
preFilterSeries := len (grouped )
101
102
102
- result := make ([]* logproto.ChunkRef , 0 , len (chunkRefs ))
103
- seriesSeen := make (map [uint64 ]struct {}, len (grouped ))
104
-
103
+ responses := make ([][]* logproto.GroupedChunkRefs , 0 , 2 )
105
104
// We can perform requests sequentially, because most of the time the request
106
105
// only covers a single day, and if not, it's at most two days.
107
106
for _ , s := range partitionSeriesByDay (from , through , grouped ) {
@@ -110,53 +109,52 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
110
109
if err != nil {
111
110
return nil , err
112
111
}
113
- var chunks int
114
- for i := range s .series {
115
- chunks += len (s .series [i ].Refs )
116
- }
117
- sp .LogKV (
118
- "day" , s .day .Time .Time (),
119
- "from" , s .interval .Start .Time (),
120
- "through" , s .interval .End .Time (),
121
- "series" , len (s .series ),
122
- "chunks" , chunks ,
123
- "blocks" , len (blocks ),
124
- "skipped" , len (skipped ),
125
- )
126
112
127
113
refs , err := bq .c .FilterChunks (ctx , tenant , s .interval , blocks , queryPlan )
128
114
if err != nil {
129
115
return nil , err
130
116
}
131
117
132
118
// add chunk refs from series that were not mapped to any blocks
133
- refs = append (refs , skipped ... )
119
+ responses = append (responses , refs , skipped )
134
120
bq .metrics .seriesSkipped .Add (float64 (len (skipped )))
121
+ }
122
+
123
+ deduped , err := mergeSeries (responses , nil )
124
+ if err != nil {
125
+ return nil , errors .Wrap (err , "failed to dedupe results" )
126
+ }
135
127
136
- for i := range refs {
137
- seriesSeen [refs [i ].Fingerprint ] = struct {}{}
138
- for _ , ref := range refs [i ].Refs {
139
- result = append (result , & logproto.ChunkRef {
140
- Fingerprint : refs [i ].Fingerprint ,
141
- UserID : tenant ,
142
- From : ref .From ,
143
- Through : ref .Through ,
144
- Checksum : ref .Checksum ,
145
- })
146
- }
128
+ result := make ([]* logproto.ChunkRef , 0 , len (chunkRefs ))
129
+ for i := range deduped {
130
+ for _ , ref := range deduped [i ].Refs {
131
+ result = append (result , & logproto.ChunkRef {
132
+ Fingerprint : deduped [i ].Fingerprint ,
133
+ UserID : tenant ,
134
+ From : ref .From ,
135
+ Through : ref .Through ,
136
+ Checksum : ref .Checksum ,
137
+ })
147
138
}
148
139
}
149
140
141
+ postFilterChunks := len (result )
142
+ postFilterSeries := len (deduped )
143
+
150
144
level .Debug (bq .logger ).Log (
145
+ "operation" , "bloomquerier.FilterChunkRefs" ,
146
+ "tenant" , tenant ,
147
+ "from" , from .Time (),
148
+ "through" , through .Time (),
149
+ "responses" , len (responses ),
151
150
"preFilterChunks" , preFilterChunks ,
152
- "postFilterChunks" , len (result ),
151
+ "postFilterChunks" , postFilterChunks ,
152
+ "filteredChunks" , preFilterChunks - postFilterChunks ,
153
153
"preFilterSeries" , preFilterSeries ,
154
- "postFilterSeries" , len (seriesSeen ),
154
+ "postFilterSeries" , postFilterSeries ,
155
+ "filteredSeries" , preFilterSeries - postFilterSeries ,
155
156
)
156
157
157
- postFilterChunks := len (result )
158
- postFilterSeries := len (seriesSeen )
159
-
160
158
bq .metrics .chunksTotal .Add (float64 (preFilterChunks ))
161
159
bq .metrics .chunksFiltered .Add (float64 (preFilterChunks - postFilterChunks ))
162
160
bq .metrics .seriesTotal .Add (float64 (preFilterSeries ))
0 commit comments