Skip to content

Commit

Permalink
Fixes all lint errors. (#2768)
Browse files Browse the repository at this point in the history
Signed-off-by: Cyril Tovena <[email protected]>
  • Loading branch information
cyriltovena authored Oct 15, 2020
1 parent 607ccc4 commit 62272cd
Show file tree
Hide file tree
Showing 49 changed files with 248 additions and 247 deletions.
3 changes: 0 additions & 3 deletions cmd/loki/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"fmt"
"os"
"reflect"
"strings"

"github.com/cortexproject/cortex/pkg/util/flagext"
"github.com/go-kit/kit/log/level"
Expand All @@ -28,8 +27,6 @@ func init() {
prometheus.MustRegister(version.NewCollector("loki"))
}

var lineReplacer = strings.NewReplacer("\n", "\\n ")

type Config struct {
loki.Config `yaml:",inline"`
printVersion bool
Expand Down
2 changes: 1 addition & 1 deletion pkg/canary/reader/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,6 @@ func nextBackoff(w io.Writer, statusCode int, backoff *util.Backoff) time.Time {
} else {
next = time.Now().Add(backoff.NextDelay())
}
fmt.Fprintf(w, "Loki returned an error code: %v, waiting %v before next query.", statusCode, next.Sub(time.Now()))
fmt.Fprintf(w, "Loki returned an error code: %v, waiting %v before next query.", statusCode, time.Until(next))
return next
}
4 changes: 2 additions & 2 deletions pkg/distributor/distributor.go
Original file line number Diff line number Diff line change
Expand Up @@ -263,8 +263,8 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}

tracker := pushTracker{
done: make(chan struct{}),
err: make(chan error),
done: make(chan struct{}),
err: make(chan error),
}
tracker.samplesPending.Store(int32(len(streams)))
for ingester, samples := range samplesByIngester {
Expand Down
2 changes: 1 addition & 1 deletion pkg/ingester/flush.go
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ func (i *Ingester) removeFlushedChunks(instance *instance, stream *stream) {
}
}

func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs labels.Labels, cs []*chunkDesc, streamsMtx *sync.RWMutex) error {
func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs labels.Labels, cs []*chunkDesc, streamsMtx sync.Locker) error {
userID, err := user.ExtractOrgID(ctx)
if err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion pkg/ingester/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -531,4 +531,4 @@ func shouldConsiderStream(stream *stream, req *logproto.SeriesRequest) bool {
return true
}
return false
}
}
2 changes: 1 addition & 1 deletion pkg/ingester/transfer.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ func (i *Ingester) transferOut(ctx context.Context) error {
return err
}

chunks := make([]*logproto.Chunk, 1, 1)
chunks := make([]*logproto.Chunk, 1)
chunks[0] = &logproto.Chunk{
Data: bb,
}
Expand Down
5 changes: 2 additions & 3 deletions pkg/logcli/output/default_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,8 @@ func TestDefaultOutput_Format(t *testing.T) {

t.Run(testName, func(t *testing.T) {
t.Parallel()

writer := &bytes.Buffer{}
out := &DefaultOutput{writer,testData.options}
out := &DefaultOutput{writer, testData.options}
out.FormatAndPrintln(testData.timestamp, testData.lbls, testData.maxLabelsLen, testData.line)

assert.Equal(t, testData.expected, writer.String())
Expand Down Expand Up @@ -114,7 +113,7 @@ func TestDefaultOutput_FormatLabelsPadding(t *testing.T) {
maxLabelsLen := findMaxLabelsLength(labelsList)
options := &LogOutputOptions{Timezone: time.UTC, NoLabels: false}
writer := &bytes.Buffer{}
out := &DefaultOutput{writer,options}
out := &DefaultOutput{writer, options}

// Format the same log line with different labels
formattedEntries := make([]string, 0, len(labelsList))
Expand Down
2 changes: 1 addition & 1 deletion pkg/logcli/output/jsonl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func TestJSONLOutput_Format(t *testing.T) {
t.Run(testName, func(t *testing.T) {
t.Parallel()
writer := &bytes.Buffer{}
out := &JSONLOutput{writer,testData.options}
out := &JSONLOutput{writer, testData.options}
out.FormatAndPrintln(testData.timestamp, testData.lbls, testData.maxLabelsLen, testData.line)

actual := writer.String()
Expand Down
2 changes: 1 addition & 1 deletion pkg/logcli/output/output.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func NewLogOutput(w io.Writer, mode string, options *LogOutputOptions) (LogOutpu

func getColor(labels string) *color.Color {
hash := fnv.New32()
hash.Write([]byte(labels))
_, _ = hash.Write([]byte(labels))
id := hash.Sum32() % uint32(len(colorList))
color := colorList[id]
return color
Expand Down
14 changes: 7 additions & 7 deletions pkg/logcli/output/output_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,19 +10,19 @@ import (
func TestNewLogOutput(t *testing.T) {
options := &LogOutputOptions{time.UTC, false, false}

out, err := NewLogOutput(nil,"default", options)
out, err := NewLogOutput(nil, "default", options)
assert.NoError(t, err)
assert.IsType(t, &DefaultOutput{nil,options}, out)
assert.IsType(t, &DefaultOutput{nil, options}, out)

out, err = NewLogOutput(nil,"jsonl", options)
out, err = NewLogOutput(nil, "jsonl", options)
assert.NoError(t, err)
assert.IsType(t, &JSONLOutput{nil,options}, out)
assert.IsType(t, &JSONLOutput{nil, options}, out)

out, err = NewLogOutput(nil,"raw", options)
out, err = NewLogOutput(nil, "raw", options)
assert.NoError(t, err)
assert.IsType(t, &RawOutput{nil,options}, out)
assert.IsType(t, &RawOutput{nil, options}, out)

out, err = NewLogOutput(nil,"unknown", options)
out, err = NewLogOutput(nil, "unknown", options)
assert.Error(t, err)
assert.Nil(t, out)
}
4 changes: 2 additions & 2 deletions pkg/logcli/output/raw.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ type RawOutput struct {
options *LogOutputOptions
}

func NewRaw (writer io.Writer, options *LogOutputOptions) LogOutput {
func NewRaw(writer io.Writer, options *LogOutputOptions) LogOutput {
return &RawOutput{
w: writer,
w: writer,
options: options,
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/logcli/output/raw_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func TestRawOutput_Format(t *testing.T) {
t.Parallel()

writer := &bytes.Buffer{}
out := &RawOutput{writer,testData.options}
out := &RawOutput{writer, testData.options}
out.FormatAndPrintln(testData.timestamp, testData.lbls, testData.maxLabelsLen, testData.line)

assert.Equal(t, testData.expected, writer.String())
Expand Down
6 changes: 3 additions & 3 deletions pkg/logcli/query/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ func (q *Query) DoQuery(c client.Client, out output.LogOutput, statistics bool)
if q.Limit < q.BatchSize {
q.BatchSize = q.Limit
}
resultLength := q.BatchSize
resultLength := 0
total := 0
start := q.Start
end := q.End
Expand Down Expand Up @@ -114,7 +114,7 @@ func (q *Query) DoQuery(c client.Client, out output.LogOutput, statistics bool)
break
}
// Also no result, wouldn't expect to hit this.
if lastEntry == nil || len(lastEntry) == 0 {
if len(lastEntry) == 0 {
break
}
// Can only happen if all the results return in one request
Expand Down Expand Up @@ -321,7 +321,7 @@ func (q *Query) printStream(streams loghttp.Streams, out output.LogOutput, lastE
printed := 0
for _, e := range allEntries {
// Skip the last entry if it overlaps, this happens because batching includes the last entry from the last batch
if lastEntry != nil && len(lastEntry) > 0 && e.entry.Timestamp == lastEntry[0].Timestamp {
if len(lastEntry) > 0 && e.entry.Timestamp == lastEntry[0].Timestamp {
skip := false
// Because many logs can share a timestamp in the unlucky event a batch ends with a timestamp
// shared by multiple entries we have to check all that were stored to see if we've already
Expand Down
Loading

0 comments on commit 62272cd

Please sign in to comment.