-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
167 lines (154 loc) · 5.23 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
version: '3'
networks:
backend:
driver: ${NETWORKS_DRIVER}
services:
Etcd:
container_name: etcd
image: bitnami/etcd:${ETCD_VERSION}
deploy:
replicas: 1
restart_policy:
condition: on-failure
environment:
- ALLOW_NONE_AUTHENTICATION=yes
privileged: true
volumes:
- ${ETCD_DIR}/data:/bitnami/etcd/data
ports:
- ${ETCD_PORT}:2379
- 2380:2380
mysql:
image: mysql:${MYSQL_VERSION}
container_name: mysql
restart: always
environment:
- TZ=${TZ}
- MYSQL_USER=${MYSQL_USERNAME} # 设置 Mysql 用户名称
- MYSQL_PASSWORD=${MYSQL_PASSWORD} # 设置 Mysql 用户密码
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} # 设置 Mysql root 用户密码
privileged: true
volumes:
- ${MYSQL_DIR}/data:/var/lib/mysql # 引用 .env 配置中 MYSQL_DIR 变量,将宿主机上存放 Mysql 数据的目录挂载到容器中 /var/lib/mysql 目录
- ${MYSQL_DIR}/conf:/etc/mysql/conf.d/
- ${MYSQL_DIR}/logs:/logs
ports:
- "${MYSQL_PORT}:3306" # 设置容器3306端口映射指定宿主机端口
networks:
- backend
redis:
image: redis:${REDIS_VERSION}
container_name: redis
environment:
- TZ=${TZ}
privileged: true
volumes:
- ${REDIS_DIR}/data:/data # 将宿主机上存放redis数据的目录挂载到容器中
- ${REDIS_DIR}/conf/redis.conf:/etc/redis/redis.conf
ports:
- "${REDIS_PORT}:6379"
restart: always
networks:
- backend
mongo:
image: mongo:${MONGO_VERSION}
container_name: mongo
restart: always
ports:
- "${MONGO_PORT}:27017"
volumes:
- ${MONGO_DIR}/db:/data/db # 挂载数据目录
- ${MONGO_DIR}/log:/var/log/mongodb # 挂载日志目录
privileged: true # 设置容器的权限为root
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGO_USERNAME}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_PASSWORD}
networks:
- backend
zookeeper:
image: docker.io/bitnami/zookeeper:3.8
container_name: zookeeper
ports:
- "${ZK_PORT}:2181"
volumes:
- ${ZK_DIR}/bitnami:/bitnami
environment:
- ALLOW_ANONYMOUS_LOGIN=yes # 匿名登录
networks:
- backend
kafka:
image: docker.io/bitnami/kafka:3.4
container_name: kafka
ports: #记得在防火墙开放端口
- "${KAFKA_PORT_1}:9092"
- "${KAFKA_PORT_2}:9093"
volumes:
- ${KAFKA_DIR}/bitnami:/bitnami
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- ALLOW_PLAINTEXT_LISTENER=yes # 配置监听者的安全协议
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CLIENT:PLAINTEXT,EXTERNAL:PLAINTEXT # 定义Kafka Broker的Listener的配置项,配置外部访问和内部访问
- KAFKA_CFG_LISTENERS=CLIENT://:9092,EXTERNAL://0.0.0.0:9093 # 将Broker的Listener信息发布到Zookeeper中,供Clients(Producer/Consumer)使用
- KAFKA_CFG_ADVERTISED_LISTENERS=CLIENT://kafka:9092,EXTERNAL://192.168.0.101:9093 # dev.wsl.net 换成自己的ip
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=CLIENT
depends_on:
- zookeeper
networks:
- backend
#收集业务数据 - Collect business data
# filebeat:
# image: elastic/filebeat:8.15.0
# container_name: filebeat
# environment:
# TZ: Asia/Shanghai
# user: root
# restart: always
# entrypoint: "filebeat -e -strict.perms=false" #解决配置文件权限问题 - Solving the configuration file permissions
# volumes:
# - ./deploy/filebeat/conf/filebeat.yml:/usr/share/filebeat/filebeat.yml
# # 此处需指定docker的containers目录,取决于你docker的配置 - The containers directory of docker needs to be specified here, depending on your docker configuration
# # 如snap安装的docker,则为/var/snap/docker/common/var-lib-docker/containers - Example if docker is installed by Snap /var/snap/docker/common/var-lib-docker/containers
# # - /var/snap/docker/common/var-lib-docker/containers:/var/lib/docker/containers
# - /var/lib/docker/containers:/var/lib/docker/containers
# depends_on:
# - kafka
#消费kafka中filebeat收集的数据输出到es
# go-stash:
# image: kevinwan/go-stash:1.0-arm64
# container_name: go-stash
# environment:
# TZ: Asia/Shanghai
# user: root
# restart: always
# volumes:
# - ./deploy/go-stash/etc:/app/etc
# depends_on:
# - elasticsearch
# - kafka
#收集kafka业务日志、存储prometheus监控数据
# elasticsearch:
# image: docker.elastic.co/elasticsearch/elasticsearch:7.13.4
# container_name: elasticsearch
# user: root
# environment:
# - discovery.type=single-node
# - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
# - TZ=Asia/Shanghai
# volumes:
# - ./data/elasticsearch/data:/usr/share/elasticsearch/data
# restart: always
# ports:
# - 9200:9200
# - 9300:9300
#查看elasticsearch数据
# kibana:
# image: docker.elastic.co/kibana/kibana:7.13.4
# container_name: kibana
# environment:
# - elasticsearch.hosts=http://elasticsearch:9200
# - TZ=Asia/Shanghai
# restart: always
# ports:
# - "5601:5601"
# depends_on:
# - elasticsearch