Skip to content

Commit

Permalink
Fixes to backup scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
tirsen committed Feb 29, 2020
1 parent 6729b99 commit 0301a42
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 10 deletions.
2 changes: 1 addition & 1 deletion charts/tidb-backup/templates/scripts/_start_backup.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ cat <<EOF > /tmp/rclone.conf
type = s3
provider = AWS
env_auth = true
region = us-west-2
region = {{ .Values.s3.region }}
EOF

cd "${backup_base_dir}"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,29 @@ echo "Reset TiKV GC life time to ${gc_life_time}"
/usr/bin/mysql -h${host} -P4000 -u${TIDB_USER} ${password_str} -Nse "select variable_name,variable_value from mysql.tidb where variable_name='tikv_gc_life_time';"

{{- if .Values.scheduledBackup.gcp }}
uploader \
--cloud=gcp \
--bucket={{ .Values.scheduledBackup.gcp.bucket }} \
--backup-dir=${backupPath}
# Once we know there are no more credentials that will be logged we can run with -x
set -x
bucket={{ .Values.scheduledBackup.gcp.bucket }}
creds=${GOOGLE_APPLICATION_CREDENTIALS:-""}
if ! [[ -z $creds ]] ; then
creds="service_account_file = ${creds}"
fi

cat <<EOF > /tmp/rclone.conf
[gcp]
type = google cloud storage
bucket_policy_only = true
$creds
EOF

cd "${backupPath}"
{{- if .Values.scheduledBackup.gcp.prefix }}
tar -cf - "${backup_name}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat gcp:${bucket}/{{ .Values.scheduledBackup.gcp.prefix }}/${backup_name}/${backup_name}.tgz
{{- else }}
tar -cf - "${backup_name}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat gcp:${bucket}/${backup_name}/${backup_name}.tgz
{{- end }}
{{- end }}

{{- if .Values.scheduledBackup.ceph }}
Expand All @@ -52,11 +71,26 @@ uploader \
{{- end }}

{{- if .Values.scheduledBackup.s3 }}
uploader \
--cloud=aws \
--region={{ .Values.scheduledBackup.s3.region }} \
--bucket={{ .Values.scheduledBackup.s3.bucket }} \
--backup-dir=${backupPath}
# Once we know there are no more credentials that will be logged we can run with -x
set -x
bucket={{ .Values.scheduledBackup.s3.bucket }}

cat <<EOF > /tmp/rclone.conf
[aws]
type = s3
provider = AWS
env_auth = true
region = {{ .Values.scheduledBackup.s3.region }}
EOF

cd "${backupPath}"
{{- if .Values.scheduledBackup.s3.prefix }}
tar -cf - "${backup_name}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat s3:${bucket}/{{ .Values.scheduledBackup.s3.prefix }}/${backup_name}/${backup_name}.tgz
{{- else }}
tar -cf - "${backup_name}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat s3:${bucket}/${backup_name}/${backup_name}.tgz
{{- end }}
{{- end }}

{{- if and (.Values.scheduledBackup.cleanupAfterUpload) (or (.Values.scheduledBackup.gcp) (or .Values.scheduledBackup.ceph .Values.scheduledBackup.s3)) }}
Expand Down
2 changes: 2 additions & 0 deletions charts/tidb-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -741,6 +741,7 @@ scheduledBackup:
# backup to gcp
gcp: {}
# bucket: ""
# prefix: ""
# secretName is the name of the secret which stores the gcp service account credentials json file
# The service account must have read/write permission to the above bucket.
# Read the following document to create the service account and download the credentials file as credentials.json:
Expand All @@ -761,6 +762,7 @@ scheduledBackup:
s3: {}
# region: ""
# bucket: ""
# prefix: ""
# secretName is the name of the secret which stores s3 object store access key and secret key
# You can create the secret by:
# kubectl create secret generic s3-backup-secret --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
Expand Down

0 comments on commit 0301a42

Please sign in to comment.