Skip to content

Commit

Permalink
Bump github.com/Shopify/sarama from v1.33.0 to v1.38.1
Browse files Browse the repository at this point in the history
Signed-off-by: axfor <[email protected]>
  • Loading branch information
axfor committed Mar 12, 2023
1 parent a172bcc commit b7ab7fd
Show file tree
Hide file tree
Showing 8 changed files with 535 additions and 549 deletions.
103 changes: 66 additions & 37 deletions cmd/ingester/app/consumer/consumer.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,12 @@
package consumer

import (
"context"
"strings"
"sync"
"time"

"github.com/Shopify/sarama"
sc "github.com/bsm/sarama-cluster"
"go.uber.org/zap"

"github.com/jaegertracing/jaeger/cmd/ingester/app/processor"
Expand All @@ -46,16 +47,14 @@ type Consumer struct {

deadlockDetector deadlockDetector

partitionIDToState map[int32]*consumerState
partitionMapLock sync.Mutex
partitionsHeld int64
partitionsHeldGauge metrics.Gauge

doneWg sync.WaitGroup
}

type consumerState struct {
partitionConsumer sc.PartitionConsumer
topic string
cancel context.CancelFunc
}

// New is a constructor for a Consumer
Expand All @@ -67,25 +66,30 @@ func New(params Params) (*Consumer, error) {
internalConsumer: params.InternalConsumer,
processorFactory: params.ProcessorFactory,
deadlockDetector: deadlockDetector,
partitionIDToState: make(map[int32]*consumerState),
partitionsHeldGauge: partitionsHeldGauge(params.MetricsFactory),
topic: params.ProcessorFactory.topic,
}, nil
}

// Start begins consuming messages in a go routine
func (c *Consumer) Start() {
c.deadlockDetector.start()
ctx, cancel := context.WithCancel(context.Background())
c.cancel = cancel
c.doneWg.Add(1)
go func() {
c.logger.Info("Starting main loop")
for pc := range c.internalConsumer.Partitions() {
c.partitionMapLock.Lock()
c.partitionIDToState[pc.Partition()] = &consumerState{partitionConsumer: pc}
c.partitionMapLock.Unlock()
c.partitionMetrics(pc.Partition()).startCounter.Inc(1)

c.doneWg.Add(2)
go c.handleMessages(pc)
go c.handleErrors(pc.Partition(), pc.Errors())
defer c.doneWg.Done()
for {
select {
case <-ctx.Done():
c.logger.Error("ctx canceld")
return
default:
c.logger.Info("Topic", zap.Strings("topic", strings.Split(c.topic, ",")))
if err := c.internalConsumer.Consume(ctx, strings.Split(c.topic, ","), c); err != nil {
c.logger.Error("Error from consumer", zap.Error(err))
}
}
}
}()
}
Expand All @@ -98,72 +102,97 @@ func (c *Consumer) Close() error {

c.logger.Debug("Closing deadlock detector")
c.deadlockDetector.close()
if c.cancel != nil {
c.cancel()
}

c.logger.Debug("Waiting for messages and errors to be handled")
c.doneWg.Wait()

return err
}

// handleMessages handles incoming Kafka messages on a channel
func (c *Consumer) handleMessages(pc sc.PartitionConsumer) {
c.logger.Info("Starting message handler", zap.Int32("partition", pc.Partition()))
// Setup is run at the beginning of a new session, before ConsumeClaim
func (c *Consumer) Setup(sarama.ConsumerGroupSession) error {
// Mark the consumer as ready
return nil
}

// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (c *Consumer) Cleanup(sarama.ConsumerGroupSession) error {
return nil
}

// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (c *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
c.partitionMetrics(claim.Partition()).startCounter.Inc(1)

c.doneWg.Add(2)
go c.handleErrors(claim.Partition(), c.internalConsumer.Errors())
c.handleMessages(session, claim)
return nil
}

func (c *Consumer) handleMessages(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) {
c.logger.Info("Starting message handler", zap.Int32("partition", claim.Partition()))
c.partitionMapLock.Lock()
c.partitionsHeld++
c.partitionsHeldGauge.Update(c.partitionsHeld)
c.partitionMapLock.Unlock()
defer func() {
c.closePartition(pc)
c.closePartition(claim)
c.partitionMapLock.Lock()
c.partitionsHeld--
c.partitionsHeldGauge.Update(c.partitionsHeld)
c.partitionMapLock.Unlock()
c.doneWg.Done()
}()

msgMetrics := c.newMsgMetrics(pc.Partition())
msgMetrics := c.newMsgMetrics(claim.Partition())

var msgProcessor processor.SpanProcessor

deadlockDetector := c.deadlockDetector.startMonitoringForPartition(pc.Partition())
deadlockDetector := c.deadlockDetector.startMonitoringForPartition(claim.Partition())
defer deadlockDetector.close()

for {
select {
case msg, ok := <-pc.Messages():
if !ok {
c.logger.Info("Message channel closed. ", zap.Int32("partition", pc.Partition()))
return
}
case msg := <-claim.Messages():

c.logger.Debug("Got msg", zap.Any("msg", msg))
msgMetrics.counter.Inc(1)
msgMetrics.offsetGauge.Update(msg.Offset)
msgMetrics.lagGauge.Update(pc.HighWaterMarkOffset() - msg.Offset - 1)
msgMetrics.lagGauge.Update(claim.HighWaterMarkOffset() - msg.Offset - 1)
deadlockDetector.incrementMsgCount()

if msgProcessor == nil {
msgProcessor = c.processorFactory.new(pc.Partition(), msg.Offset-1)
msgProcessor = c.processorFactory.new(session, claim, msg.Offset-1)
defer msgProcessor.Close()
}

msgProcessor.Process(saramaMessageWrapper{msg})

// Should return when `session.Context()` is done.
// If not, will raise `ErrRebalanceInProgress` or `read tcp <ip>:<port>: i/o timeout` when kafka rebalance. see:
// https://github.com/Shopify/sarama/issues/1192
case <-session.Context().Done():
c.logger.Info("Session done", zap.Int32("partition", claim.Partition()))
return
case <-deadlockDetector.closePartitionChannel():
c.logger.Info("Closing partition due to inactivity", zap.Int32("partition", pc.Partition()))
c.logger.Info("Closing partition due to inactivity", zap.Int32("partition", claim.Partition()))
return
}
}
}

func (c *Consumer) closePartition(partitionConsumer sc.PartitionConsumer) {
c.logger.Info("Closing partition consumer", zap.Int32("partition", partitionConsumer.Partition()))
partitionConsumer.Close() // blocks until messages channel is drained
c.partitionMetrics(partitionConsumer.Partition()).closeCounter.Inc(1)
c.logger.Info("Closed partition consumer", zap.Int32("partition", partitionConsumer.Partition()))
func (c *Consumer) closePartition(claim sarama.ConsumerGroupClaim) {
c.logger.Info("Closing partition consumer", zap.Int32("partition", claim.Partition()))
// claim.Close() // blocks until messages channel is drained
c.partitionMetrics(claim.Partition()).closeCounter.Inc(1)
c.logger.Info("Closed partition consumer", zap.Int32("partition", claim.Partition()))
}

// handleErrors handles incoming Kafka consumer errors on a channel
func (c *Consumer) handleErrors(partition int32, errChan <-chan *sarama.ConsumerError) {
func (c *Consumer) handleErrors(partition int32, errChan <-chan error) {
c.logger.Info("Starting error handler", zap.Int32("partition", partition))
defer c.doneWg.Done()

Expand Down
Loading

0 comments on commit b7ab7fd

Please sign in to comment.