@@ -174,22 +174,6 @@ type Interface interface {
174
174
PrepareShutdown (w http.ResponseWriter , r * http.Request )
175
175
}
176
176
177
- type flushCtx struct {
178
- lock * sync.RWMutex
179
- flushDone chan struct {}
180
- newCtxAvailable chan struct {}
181
- segmentWriter * wal.SegmentWriter
182
- creationTime time.Time
183
- }
184
-
185
- func (o * flushCtx ) Key () string {
186
- return fmt .Sprintf ("%d" , o .creationTime .UnixNano ())
187
- }
188
-
189
- func (o * flushCtx ) Priority () int64 {
190
- return - o .creationTime .UnixNano ()
191
- }
192
-
193
177
// Ingester builds chunks for incoming log streams.
194
178
type Ingester struct {
195
179
services.Service
@@ -217,10 +201,11 @@ type Ingester struct {
217
201
218
202
// One queue per flush thread. Fingerprint is used to
219
203
// pick a queue.
204
+ numOps int64
220
205
flushQueues []* util.PriorityQueue
221
206
flushQueuesDone sync.WaitGroup
222
207
223
- flushCtx * flushCtx
208
+ wal * wal. Manager
224
209
225
210
limiter * Limiter
226
211
@@ -268,7 +253,11 @@ func New(cfg Config, clientConfig client.Config,
268
253
targetSizeStats .Set (int64 (cfg .TargetChunkSize ))
269
254
metrics := newIngesterMetrics (registerer , metricsNamespace )
270
255
271
- segmentWriter , err := wal .NewWalSegmentWriter ()
256
+ walManager , err := wal .NewManager (wal.Config {
257
+ MaxAge : wal .DefaultMaxAge ,
258
+ MaxSegments : wal .DefaultMaxSegments ,
259
+ MaxSegmentSize : wal .DefaultMaxSegmentSize ,
260
+ })
272
261
if err != nil {
273
262
return nil , err
274
263
}
@@ -291,12 +280,7 @@ func New(cfg Config, clientConfig client.Config,
291
280
writeLogManager : writefailures .NewManager (logger , registerer , writeFailuresCfg , configs , "ingester_rf1" ),
292
281
customStreamsTracker : customStreamsTracker ,
293
282
readRing : readRing ,
294
- flushCtx : & flushCtx {
295
- lock : & sync.RWMutex {},
296
- flushDone : make (chan struct {}),
297
- newCtxAvailable : make (chan struct {}),
298
- segmentWriter : segmentWriter ,
299
- },
283
+ wal : walManager ,
300
284
}
301
285
302
286
// TODO: change flush on shutdown
@@ -477,7 +461,6 @@ func (i *Ingester) running(ctx context.Context) error {
477
461
func (i * Ingester ) stopping (_ error ) error {
478
462
i .stopIncomingRequests ()
479
463
var errs util.MultiError
480
- // errs.Add(i.wal.Stop())
481
464
482
465
//if i.flushOnShutdownSwitch.Get() {
483
466
// i.lifecycler.SetFlushOnShutdown(true)
@@ -567,30 +550,18 @@ func (i *Ingester) loop() {
567
550
}
568
551
569
552
func (i * Ingester ) doFlushTick () {
570
- i .flushCtx .lock .Lock ()
571
-
572
- // i.logger.Log("msg", "starting periodic flush")
573
- // Stop new chunks being written while we swap destinations - we'll never unlock as this flushctx can no longer be used.
574
- currentFlushCtx := i .flushCtx
575
-
576
- // APIs become unblocked after resetting flushCtx
577
- segmentWriter , err := wal .NewWalSegmentWriter ()
578
- if err != nil {
579
- // TODO: handle this properly
580
- panic (err )
581
- }
582
- i .flushCtx = & flushCtx {
583
- lock : & sync.RWMutex {},
584
- flushDone : make (chan struct {}),
585
- newCtxAvailable : make (chan struct {}),
586
- segmentWriter : segmentWriter ,
587
- }
588
- close (currentFlushCtx .newCtxAvailable ) // Broadcast to all waiters that they can now fetch a new flushCtx. Small chance of a race but if they re-fetch the old one, they'll just check again immediately.
589
- // Flush the finished context in the background & then notify watching API requests
590
- // TODO: use multiple flush queues if required
591
- // Don't write empty segments if there is nothing to write.
592
- if currentFlushCtx .segmentWriter .InputSize () > 0 {
593
- i .flushQueues [0 ].Enqueue (currentFlushCtx )
553
+ for {
554
+ // Keep adding ops to the queue until there are no more.
555
+ it , _ := i .wal .NextPending ()
556
+ if it == nil {
557
+ break
558
+ }
559
+ i .numOps ++
560
+ flushQueueIndex := i .numOps % int64 (i .cfg .ConcurrentFlushes )
561
+ i .flushQueues [flushQueueIndex ].Enqueue (& flushOp {
562
+ num : i .numOps ,
563
+ it : it ,
564
+ })
594
565
}
595
566
}
596
567
@@ -796,27 +767,11 @@ func (i *Ingester) Push(ctx context.Context, req *logproto.PushRequest) (*logpro
796
767
return & logproto.PushResponse {}, err
797
768
}
798
769
799
- // Fetch a flush context and try to acquire the RLock
800
- // The only time the Write Lock is held is when this context is no longer usable and a new one is being created.
801
- // In this case, we need to re-read i.flushCtx in order to fetch the new one as soon as it's available.
802
- // The newCtxAvailable chan is closed as soon as the new one is available to avoid a busy loop.
803
- currentFlushCtx := i .flushCtx
804
- for ! currentFlushCtx .lock .TryRLock () {
805
- select {
806
- case <- currentFlushCtx .newCtxAvailable :
807
- case <- ctx .Done ():
808
- return & logproto.PushResponse {}, ctx .Err ()
809
- }
810
- currentFlushCtx = i .flushCtx
811
- }
812
- err = instance .Push (ctx , req , currentFlushCtx )
813
- currentFlushCtx .lock .RUnlock ()
814
- select {
815
- case <- ctx .Done ():
816
- return & logproto.PushResponse {}, ctx .Err ()
817
- case <- currentFlushCtx .flushDone :
818
- return & logproto.PushResponse {}, err
770
+ if err = instance .Push (ctx , i .wal , req ); err != nil {
771
+ return nil , err
819
772
}
773
+
774
+ return & logproto.PushResponse {}, nil
820
775
}
821
776
822
777
// GetStreamRates returns a response containing all streams and their current rate
@@ -851,7 +806,7 @@ func (i *Ingester) GetOrCreateInstance(instanceID string) (*instance, error) { /
851
806
inst , ok = i .instances [instanceID ]
852
807
if ! ok {
853
808
var err error
854
- inst , err = newInstance (& i .cfg , i .periodicConfigs , instanceID , i .limiter , i .tenantConfigs , i .metrics , i .streamRateCalculator , i .writeLogManager , i .customStreamsTracker )
809
+ inst , err = newInstance (& i .cfg , i .periodicConfigs , instanceID , i .limiter , i .tenantConfigs , i .metrics , i .streamRateCalculator , i .writeLogManager , i .customStreamsTracker , i . logger )
855
810
if err != nil {
856
811
return nil , err
857
812
}
0 commit comments