Skip to content

Commit

Permalink
Begin Scaleable Single Binary example for docker-compose
Browse files Browse the repository at this point in the history
  • Loading branch information
zalegrala committed Oct 6, 2021
1 parent f73f0ff commit fecc219
Show file tree
Hide file tree
Showing 5 changed files with 269 additions and 0 deletions.
98 changes: 98 additions & 0 deletions example/docker-compose/scalable-single-binary/docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
version: "3"
services:

tempo1:
image: grafana/tempo:latest
command: "-target=ha -config.file=/etc/tempo.yaml"
volumes:
- ./tempo-scalable-single-binary.yaml:/etc/tempo.yaml
ports:
- "14268" # jaeger ingest
- "3200" # tempo
- "7946" # memberlist
networks:
default:
aliases:
- tempo
depends_on:
- minio

tempo2:
image: grafana/tempo:latest
command: "-target=ha -config.file=/etc/tempo.yaml"
volumes:
- ./tempo-scalable-single-binary.yaml:/etc/tempo.yaml
ports:
- "3200" # tempo
- "7946" # memberlist
networks:
default:
aliases:
- tempo
depends_on:
- minio

tempo3:
image: grafana/tempo:latest
command: "-target=ha -config.file=/etc/tempo.yaml"
volumes:
- ./tempo-scalable-single-binary.yaml:/etc/tempo.yaml
ports:
- "3200" # tempo
- "7946" # memberlist
networks:
default:
aliases:
- tempo
depends_on:
- minio

vulture:
image: grafana/tempo-vulture:latest
command:
- "-prometheus-listen-address=:3201"
- "-tempo-push-url=http://tempo1"
- "-tempo-query-url=http://tempo1:3200"
- "-tempo-retention-duration=1h"
ports:
- "3201:3201"

prometheus:
image: prom/prometheus:latest
command: [ "--config.file=/etc/prometheus.yaml" ]
volumes:
- ./prometheus.yaml:/etc/prometheus.yaml
ports:
- "9090:9090"

grafana:
image: grafana/grafana:8.1.6
volumes:
- ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
environment:
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
- GF_AUTH_DISABLE_LOGIN_FORM=true
ports:
- "3000:3000"

minio:
image: minio/minio:latest
environment:
- MINIO_ACCESS_KEY=tempo
- MINIO_SECRET_KEY=supersecret
ports:
- "9000:9000"
- "9001:9001"
entrypoint:
- sh
- -euc
- mkdir -p /data/tempo && /usr/bin/minio server /data --console-address ':9001'

synthetic-load-generator:
image: omnition/synthetic-load-generator:1.0.25
volumes:
- ../shared/load-generator.json:/etc/load-generator.json
environment:
- TOPOLOGY_FILE=/etc/load-generator.json
- JAEGER_COLLECTOR_URL=http://tempo1:14268
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: 1

datasources:
- name: Prometheus
type: prometheus
access: proxy
orgId: 1
url: http://prometheus:9090
basicAuth: false
isDefault: false
version: 1
editable: false
- name: Tempo
type: tempo
access: proxy
orgId: 1
url: http://tempo1:3200
basicAuth: false
isDefault: true
version: 1
editable: false
apiVersion: 1
uid: tempo
18 changes: 18 additions & 0 deletions example/docker-compose/scalable-single-binary/prometheus.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
global:
scrape_interval: 15s
evaluation_interval: 15s

scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: [ 'localhost:9090' ]
- job_name: 'tempo'
static_configs:
- targets: [
'tempo1:3200',
'tempo2:3200',
'tempo3:3200',
]
- job_name: 'vulture'
static_configs:
- targets: [ 'vulture:3201' ]
58 changes: 58 additions & 0 deletions example/docker-compose/scalable-single-binary/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
## Scalable Single Binary

In this example tempo is configured to write data to MinIO which presents an S3 compatible API. Additionally, `memberlist` is enabled to demonstrate how a single binary can run all services and still make use of the cluster-awareness that `memberlist` provides.

1. First start up the local stack.

```console
docker-compose up -d
```

At this point, the following containers should be spun up -

```console
docker-compose ps
```
```
NAME COMMAND SERVICE STATUS PORTS
scalable-single-binary-grafana-1 "/run.sh" grafana exited (1)
scalable-single-binary-minio-1 "sh -euc 'mkdir -p /…" minio running 0.0.0.0:9000-9001->9000-9001/tcp, :::9000-9001->9000-9001/tcp
scalable-single-binary-prometheus-1 "/bin/prometheus --c…" prometheus running 0.0.0.0:9090->9090/tcp, :::9090->9090/tcp
scalable-single-binary-synthetic-load-generator-1 "./start.sh" synthetic-load-generator running
scalable-single-binary-tempo1-1 "/tempo -target=ha -…" tempo1 exited (1)
scalable-single-binary-tempo2-1 "/tempo -target=ha -…" tempo2 exited (1)
scalable-single-binary-tempo3-1 "/tempo -target=ha -…" tempo3 exited (1)
scalable-single-binary-vulture-1 "/tempo-vulture -pro…" vulture running 0.0.0.0:3201->3201/tcp, :::3201->3201/tcp
```

2. If you're interested you can see the wal/blocks as they are being created. Navigate to minio at
http://localhost:9001 and use the username/password of `tempo`/`supersecret`.

3. The synthetic-load-generator is now printing out trace ids it's flushing into Tempo. To view its logs use -

```console
docker-compose logs -f synthetic-load-generator
```
```
synthetic-load-generator_1 | 20/10/24 08:27:09 INFO ScheduledTraceGenerator: Emitted traceId 57aedb829f352625 for service frontend route /product
synthetic-load-generator_1 | 20/10/24 08:27:09 INFO ScheduledTraceGenerator: Emitted traceId 25fa96b9da24b23f for service frontend route /cart
synthetic-load-generator_1 | 20/10/24 08:27:09 INFO ScheduledTraceGenerator: Emitted traceId 15b3ad814b77b779 for service frontend route /shipping
synthetic-load-generator_1 | 20/10/24 08:27:09 INFO ScheduledTraceGenerator: Emitted traceId 3803db7d7d848a1a for service frontend route /checkout
```

Logs are in the form

```
Emitted traceId <traceid> for service frontend route /cart
```

Copy one of these trace ids.

4. Navigate to [Grafana](http://localhost:3000/explore) and paste the trace id to request it from Tempo.
Also notice that you can query Tempo metrics from the Prometheus data source setup in Grafana.

5. To stop the setup use -

```console
docker-compose down -v
```
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
server:
http_listen_port: 3200

search_enabled: true

distributor:
receivers: # this configuration will listen on all ports and protocols that tempo is capable of.
jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can
protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver
thrift_http: #
grpc: # for a production deployment you should only enable the receivers you need!
thrift_binary:
thrift_compact:
zipkin:
otlp:
protocols:
http:
grpc:
opencensus:

ingester:
trace_idle_period: 10s # the length of time after a trace has not received spans to consider it complete and flush it
max_block_bytes: 1_000_000 # cut the head block when it hits this size or ...
max_block_duration: 5m # this much time passes
lifecycler:
ring:
kvstore:
store: memberlist
replication_factor: 3
heartbeat_period: 100ms

compactor:
compaction:
compaction_window: 1h # blocks in this time window will be compacted together
max_block_bytes: 100_000_000 # maximum size of compacted blocks
block_retention: 1h
compacted_block_retention: 10m

memberlist:
abort_if_cluster_join_fails: false
bind_port: 7946
join_members:
- tempo1:7946
- tempo2:7946
- tempo3:7946

storage:
trace:
backend: s3 # backend configuration to use
block:
bloom_filter_false_positive: .05 # bloom filter false positive rate. lower values create larger filters but fewer false positives
index_downsample_bytes: 1000 # number of bytes per index record
encoding: zstd # block encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd, s2
wal:
path: /tmp/tempo/wal # where to store the the wal locally
encoding: snappy # wal encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd, s2
s3:
bucket: tempo # how to store data in s3
endpoint: minio:9000
access_key: tempo
secret_key: supersecret
insecure: true
# For using AWS, select the appropriate regional endpoint and region
# endpoint: s3.dualstack.us-west-2.amazonaws.com
# region: us-west-2
pool:
max_workers: 100 # worker pool determines the number of parallel requests to the object store backend
queue_depth: 10000

querier:
frontend_worker:
frontend_address: tempo:9095

0 comments on commit fecc219

Please sign in to comment.