Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support no secret for s3/ceph #1817

Merged
merged 7 commits into from
Mar 4, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 20 additions & 9 deletions charts/tidb-backup/templates/scripts/_start_backup.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -89,13 +89,24 @@ uploader \
{{- end }}

{{- if .Values.s3 }}
uploader \
--cloud=aws \
--region={{ .Values.s3.region }} \
{{- if .Values.s3.prefix }}
--bucket={{ .Values.s3.bucket }}/{{ .Values.s3.prefix }} \
{{- else }}
--bucket={{ .Values.s3.bucket }} \
{{- end }}
--backup-dir=${dirname}
# Once we know there are no more credentials that will be logged we can run with -x
set -x
bucket={{ .Values.s3.bucket }}

cat <<EOF > /tmp/rclone.conf
[s3]
type = s3
provider = AWS
env_auth = true
region = {{ .Values.s3.region }}
EOF

cd "${backup_base_dir}"
{{- if .Values.s3.prefix }}
tar -cf - "${backup_name}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat s3:${bucket}/{{ .Values.s3.prefix }}/${backup_name}/${backup_name}.tgz
{{- else }}
tar -cf - "${backup_name}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat s3:${bucket}/${backup_name}/${backup_name}.tgz
{{- end }}
{{- end }}
2 changes: 1 addition & 1 deletion charts/tidb-backup/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ name: fullbackup-{{ date "200601021504" .Release.Time }}
image:
pullPolicy: IfNotPresent
# https://github.com/pingcap/tidb-cloud-backup
backup: pingcap/tidb-cloud-backup:20191217
backup: pingcap/tidb-cloud-backup:20200229

## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ spec:
- name: GOOGLE_APPLICATION_CREDENTIALS
value: /gcp/credentials.json
{{- end }}
{{- if or .Values.scheduledBackup.ceph .Values.scheduledBackup.s3 }}
{{- if or .Values.scheduledBackup.ceph.secretName .Values.scheduledBackup.s3.secretName }}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@ set -euo pipefail
host=$(getent hosts {{ template "cluster.name" . }}-tidb | head | awk '{print $1}')

backupName=scheduled-backup-`date "+%Y%m%d-%H%M%S"`
backupPath=/data/${backupName}
backupBase=/data
backupPath=${backupBase}/${backupName}

echo "making dir ${backupPath}"
mkdir -p ${backupPath}
Expand Down Expand Up @@ -37,10 +38,29 @@ echo "Reset TiKV GC life time to ${gc_life_time}"
/usr/bin/mysql -h${host} -P4000 -u${TIDB_USER} ${password_str} -Nse "select variable_name,variable_value from mysql.tidb where variable_name='tikv_gc_life_time';"

{{- if .Values.scheduledBackup.gcp }}
uploader \
--cloud=gcp \
--bucket={{ .Values.scheduledBackup.gcp.bucket }} \
--backup-dir=${backupPath}
# Once we know there are no more credentials that will be logged we can run with -x
set -x
bucket={{ .Values.scheduledBackup.gcp.bucket }}
creds=${GOOGLE_APPLICATION_CREDENTIALS:-""}
if ! [[ -z $creds ]] ; then
creds="service_account_file = ${creds}"
fi

cat <<EOF > /tmp/rclone.conf
[gcp]
type = google cloud storage
bucket_policy_only = true
$creds
EOF

cd "${backupBase}"
{{- if .Values.scheduledBackup.gcp.prefix }}
tar -cf - "${backupName}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat gcp:${bucket}/{{ .Values.scheduledBackup.gcp.prefix }}/${backupName}/${backupName}.tgz
{{- else }}
tar -cf - "${backupName}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat gcp:${bucket}/${backupName}/${backupName}.tgz
{{- end }}
{{- end }}

{{- if .Values.scheduledBackup.ceph }}
Expand All @@ -52,11 +72,26 @@ uploader \
{{- end }}

{{- if .Values.scheduledBackup.s3 }}
uploader \
--cloud=aws \
--region={{ .Values.scheduledBackup.s3.region }} \
--bucket={{ .Values.scheduledBackup.s3.bucket }} \
--backup-dir=${backupPath}
# Once we know there are no more credentials that will be logged we can run with -x
set -x
bucket={{ .Values.scheduledBackup.s3.bucket }}

cat <<EOF > /tmp/rclone.conf
[s3]
type = s3
provider = AWS
env_auth = true
region = {{ .Values.scheduledBackup.s3.region }}
EOF

cd "${backupBase}"
{{- if .Values.scheduledBackup.s3.prefix }}
tar -cf - "${backupName}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat s3:${bucket}/{{ .Values.scheduledBackup.s3.prefix }}/${backupName}/${backupName}.tgz
{{- else }}
tar -cf - "${backupName}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat s3:${bucket}/${backupName}/${backupName}.tgz
{{- end }}
{{- end }}

{{- if and (.Values.scheduledBackup.cleanupAfterUpload) (or (.Values.scheduledBackup.gcp) (or .Values.scheduledBackup.ceph .Values.scheduledBackup.s3)) }}
Expand Down
4 changes: 3 additions & 1 deletion charts/tidb-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -700,7 +700,7 @@ binlog:
scheduledBackup:
create: false
# https://github.com/pingcap/tidb-cloud-backup
mydumperImage: pingcap/tidb-cloud-backup:20191217
mydumperImage: pingcap/tidb-cloud-backup:20200229
mydumperImagePullPolicy: IfNotPresent
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
Expand Down Expand Up @@ -741,6 +741,7 @@ scheduledBackup:
# backup to gcp
gcp: {}
# bucket: ""
# prefix: ""
# secretName is the name of the secret which stores the gcp service account credentials json file
# The service account must have read/write permission to the above bucket.
# Read the following document to create the service account and download the credentials file as credentials.json:
Expand All @@ -761,6 +762,7 @@ scheduledBackup:
s3: {}
# region: ""
# bucket: ""
# prefix: ""
# secretName is the name of the secret which stores s3 object store access key and secret key
# You can create the secret by:
# kubectl create secret generic s3-backup-secret --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
Expand Down