Skip to content

Commit 3cc3db9

Browse files
chore: Remove ifc from interface names (#15072)
1 parent b406015 commit 3cc3db9

File tree

7 files changed

+28
-28
lines changed

7 files changed

+28
-28
lines changed

pkg/blockbuilder/controller.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -65,13 +65,13 @@ type PartitionController interface {
6565
// containing log data and "committed" is the consumer group
6666
type PartitionJobController struct {
6767
stepLen int64
68-
part partition.ReaderIfc
68+
part partition.Reader
6969
backoff backoff.Config
7070
decoder *kafka.Decoder
7171
}
7272

7373
func NewPartitionJobController(
74-
controller partition.ReaderIfc,
74+
controller partition.Reader,
7575
backoff backoff.Config,
7676
) (*PartitionJobController, error) {
7777
decoder, err := kafka.NewDecoder()

pkg/kafka/partition/committer.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -26,15 +26,15 @@ type partitionCommitter struct {
2626
lastCommittedOffset prometheus.Gauge
2727

2828
logger log.Logger
29-
reader ReaderIfc
29+
reader Reader
3030
commitFreq time.Duration
3131

3232
toCommit *atomic.Int64
3333
wg sync.WaitGroup
3434
cancel context.CancelFunc
3535
}
3636

37-
func newCommitter(reader ReaderIfc, commitFreq time.Duration, logger log.Logger, reg prometheus.Registerer) *partitionCommitter {
37+
func newCommitter(reader Reader, commitFreq time.Duration, logger log.Logger, reg prometheus.Registerer) *partitionCommitter {
3838
c := &partitionCommitter{
3939
logger: logger,
4040
reader: reader,

pkg/kafka/partition/committer_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ func TestPartitionCommitter(t *testing.T) {
3636
reg := prometheus.NewRegistry()
3737
partitionID := int32(1)
3838
consumerGroup := "test-consumer-group"
39-
reader := newReader(
39+
reader := newStdReader(
4040
client,
4141
kafkaCfg.Topic,
4242
partitionID,

pkg/kafka/partition/reader.go

+18-18
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ type Record struct {
3737
Offset int64
3838
}
3939

40-
type ReaderIfc interface {
40+
type Reader interface {
4141
Topic() string
4242
Partition() int32
4343
ConsumerGroup() string
@@ -91,8 +91,8 @@ func newReaderMetrics(r prometheus.Registerer) *readerMetrics {
9191
}
9292
}
9393

94-
// Reader provides low-level access to Kafka partition reading operations
95-
type Reader struct {
94+
// StdReader provides low-level access to Kafka partition reading operations
95+
type StdReader struct {
9696
client *kgo.Client
9797
topic string
9898
partitionID int32
@@ -101,13 +101,13 @@ type Reader struct {
101101
logger log.Logger
102102
}
103103

104-
func NewReader(
104+
func NewStdReader(
105105
cfg kafka.Config,
106106
partitionID int32,
107107
instanceID string,
108108
logger log.Logger,
109109
reg prometheus.Registerer,
110-
) (*Reader, error) {
110+
) (*StdReader, error) {
111111
// Create a new Kafka client for this reader
112112
clientMetrics := client.NewReaderClientMetrics("partition-reader", reg)
113113
c, err := client.NewReaderClient(
@@ -120,7 +120,7 @@ func NewReader(
120120
}
121121

122122
// Create the reader
123-
return newReader(
123+
return newStdReader(
124124
c,
125125
cfg.Topic,
126126
partitionID,
@@ -130,16 +130,16 @@ func NewReader(
130130
), nil
131131
}
132132

133-
// NewReader creates a new Reader instance
134-
func newReader(
133+
// newStdReader creates a new StdReader instance
134+
func newStdReader(
135135
client *kgo.Client,
136136
topic string,
137137
partitionID int32,
138138
consumerGroup string,
139139
logger log.Logger,
140140
reg prometheus.Registerer,
141-
) *Reader {
142-
return &Reader{
141+
) *StdReader {
142+
return &StdReader{
143143
client: client,
144144
topic: topic,
145145
partitionID: partitionID,
@@ -150,22 +150,22 @@ func newReader(
150150
}
151151

152152
// Topic returns the topic being read
153-
func (r *Reader) Topic() string {
153+
func (r *StdReader) Topic() string {
154154
return r.topic
155155
}
156156

157157
// Partition returns the partition being read
158-
func (r *Reader) Partition() int32 {
158+
func (r *StdReader) Partition() int32 {
159159
return r.partitionID
160160
}
161161

162162
// ConsumerGroup returns the consumer group
163-
func (r *Reader) ConsumerGroup() string {
163+
func (r *StdReader) ConsumerGroup() string {
164164
return r.consumerGroup
165165
}
166166

167167
// FetchLastCommittedOffset retrieves the last committed offset for this partition
168-
func (r *Reader) FetchLastCommittedOffset(ctx context.Context) (int64, error) {
168+
func (r *StdReader) FetchLastCommittedOffset(ctx context.Context) (int64, error) {
169169
req := kmsg.NewPtrOffsetFetchRequest()
170170
req.Topics = []kmsg.OffsetFetchRequestTopic{{
171171
Topic: r.topic,
@@ -210,7 +210,7 @@ func (r *Reader) FetchLastCommittedOffset(ctx context.Context) (int64, error) {
210210
}
211211

212212
// FetchPartitionOffset retrieves the offset for a specific position
213-
func (r *Reader) FetchPartitionOffset(ctx context.Context, position SpecialOffset) (int64, error) {
213+
func (r *StdReader) FetchPartitionOffset(ctx context.Context, position SpecialOffset) (int64, error) {
214214
partitionReq := kmsg.NewListOffsetsRequestTopicPartition()
215215
partitionReq.Partition = r.partitionID
216216
partitionReq.Timestamp = int64(position)
@@ -257,7 +257,7 @@ func (r *Reader) FetchPartitionOffset(ctx context.Context, position SpecialOffse
257257
}
258258

259259
// Poll retrieves the next batch of records from Kafka
260-
func (r *Reader) Poll(ctx context.Context) ([]Record, error) {
260+
func (r *StdReader) Poll(ctx context.Context) ([]Record, error) {
261261
start := time.Now()
262262
fetches := r.client.PollFetches(ctx)
263263
r.metrics.fetchWaitDuration.Observe(time.Since(start).Seconds())
@@ -303,14 +303,14 @@ func (r *Reader) Poll(ctx context.Context) ([]Record, error) {
303303
return records, nil
304304
}
305305

306-
func (r *Reader) SetOffsetForConsumption(offset int64) {
306+
func (r *StdReader) SetOffsetForConsumption(offset int64) {
307307
r.client.AddConsumePartitions(map[string]map[int32]kgo.Offset{
308308
r.topic: {r.partitionID: kgo.NewOffset().At(offset)},
309309
})
310310
}
311311

312312
// Commit commits an offset to the consumer group
313-
func (r *Reader) Commit(ctx context.Context, offset int64) error {
313+
func (r *StdReader) Commit(ctx context.Context, offset int64) error {
314314
admin := kadm.NewClient(r.client)
315315

316316
// Commit the last consumed offset.

pkg/kafka/partition/reader_service.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ type ReaderService struct {
5555
services.Service
5656

5757
cfg ReaderConfig
58-
reader ReaderIfc
58+
reader Reader
5959
consumerFactory ConsumerFactory
6060
logger log.Logger
6161
metrics *serviceMetrics
@@ -82,7 +82,7 @@ func NewReaderService(
8282
) (*ReaderService, error) {
8383

8484
// Create the reader
85-
reader, err := NewReader(
85+
reader, err := NewStdReader(
8686
kafkaCfg,
8787
partitionID,
8888
instanceID,
@@ -109,7 +109,7 @@ func NewReaderService(
109109

110110
func newReaderServiceFromIfc(
111111
cfg ReaderConfig,
112-
reader ReaderIfc,
112+
reader Reader,
113113
consumerFactory ConsumerFactory,
114114
logger log.Logger,
115115
reg prometheus.Registerer,

pkg/kafka/partition/reader_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ func readersFromKafkaCfg(
6363
kafkaCfg kafka.Config,
6464
consumerFactory ConsumerFactory,
6565
partition int32,
66-
) (ReaderIfc, *ReaderService) {
66+
) (Reader, *ReaderService) {
6767
partitionReader, err := NewReaderService(
6868
kafkaCfg,
6969
partition,

pkg/loki/modules.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -1808,7 +1808,7 @@ func (t *Loki) initBlockBuilder() (services.Service, error) {
18081808
return nil, fmt.Errorf("calculating block builder partition ID: %w", err)
18091809
}
18101810

1811-
reader, err := partition.NewReader(
1811+
reader, err := partition.NewStdReader(
18121812
t.Cfg.KafkaConfig,
18131813
ingestPartitionID,
18141814
id,

0 commit comments

Comments
 (0)