Tidb-backup启动出错

在k8s集群部署tidb
tidb-backup启动出错

[root@worker10 tidb-backup]# kubectl logs tidb-cluster-fullbackup-20191031-m2vjr -n tidb-admin
Error from server (BadRequest): container "backup" in pod "tidb-cluster-fullbackup-20191031-m2vjr" is waiting to start: CreateContainerConfigError
[root@worker10 tidb-backup]# 

[root@worker10 tidb-backup]# kubectl describe pods  tidb-cluster-fullbackup-20191031-m2vjr -n tidb-admin
Name:           tidb-cluster-fullbackup-20191031-m2vjr
Namespace:      tidb-admin
Node:           10.1.52.29/10.1.52.29
Start Time:     Thu, 31 Oct 2019 13:46:06 +0800
Labels:         app.kubernetes.io/component=backup
                app.kubernetes.io/instance=denal-tidb-backup
                app.kubernetes.io/managed-by=Tiller
                app.kubernetes.io/name=tidb-backup
                controller-uid=718a0f2e-fba1-11e9-95ef-18602480f56a
                helm.sh/chart=tidb-backup-v1.0.0
                job-name=tidb-cluster-fullbackup-20191031
Annotations:    <none>
Status:         Pending
IP:             172.20.22.7
Controlled By:  Job/tidb-cluster-fullbackup-20191031
Containers:
  backup:
    Container ID:  
    Image:         pingcap/tidb-cloud-backup:20190610
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
      -c
      set -euo pipefail
      
      host=$(getent hosts tidb-cluster-tidb | head | awk '{print $1}')
      
      dirname=/data/${BACKUP_NAME}
      echo "making dir ${dirname}"
      mkdir -p ${dirname}
      
      password_str=""
      if [ -n "${TIDB_PASSWORD}" ];
      then
          password_str="-p${TIDB_PASSWORD}"
      fi
      
      gc_life_time=`/usr/bin/mysql -h${host} -P4000 -u${TIDB_USER} ${password_str} -Nse "select variable_value from mysql.tidb where variable_name='tikv_gc_life_time';"`
      echo "Old TiKV GC life time is ${gc_life_time}"
      
      echo "Increase TiKV GC life time to 3h"
      /usr/bin/mysql -h${host} -P4000 -u${TIDB_USER} ${password_str} -Nse "update mysql.tidb set variable_value='3h' where variable_name='tikv_gc_life_time';"
      /usr/bin/mysql -h${host} -P4000 -u${TIDB_USER} ${password_str} -Nse "select variable_name,variable_value from mysql.tidb where variable_name='tikv_gc_life_time';"
      
      if [ -n "" ];
      then
          snapshot_args="--tidb-snapshot="
          echo "commitTS = " > ${dirname}/savepoint
          cat ${dirname}/savepoint
      fi
      
      /mydumper 
        --outputdir=${dirname} 
        --host=${host} 
        --port=4000 
        --user=${TIDB_USER} 
        --password=${TIDB_PASSWORD} 
        --long-query-guard=3600 
        --tidb-force-priority=LOW_PRIORITY 
        --verbose=3 ${snapshot_args:-}
      
      echo "Reset TiKV GC life time to ${gc_life_time}"
      /usr/bin/mysql -h${host} -P4000 -u${TIDB_USER} ${password_str} -Nse "update mysql.tidb set variable_value='${gc_life_time}' where variable_name='tikv_gc_life_time';"
      /usr/bin/mysql -h${host} -P4000 -u${TIDB_USER} ${password_str} -Nse "select variable_name,variable_value from mysql.tidb where variable_name='tikv_gc_life_time';"
    State:          Waiting
      Reason:       CreateContainerConfigError
    Ready:          False
    Restart Count:  0
    Environment:
      BACKUP_NAME:    fullbackup-20191031
      TIDB_USER:      <set to the key 'user' in secret 'backup-secret'>      Optional: false
      TIDB_PASSWORD:  <set to the key 'password' in secret 'backup-secret'>  Optional: false
    Mounts:
      /data from data (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-brsjh (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  data:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  fullbackup-20191031
    ReadOnly:   false
  default-token-brsjh:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-brsjh
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     <none>
Events:
  Type    Reason  Age                  From                 Message
  ----    ------  ----                 ----                 -------
  Normal  Pulled  0s (x5608 over 20h)  kubelet, 10.1.52.29  Container image "pingcap/tidb-cloud-backup:20190610" already present on machine

发一下你的 values.yaml 文件看下,你集群里面有对应的 pv 么?

# Default values for tidb-backup.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

# clusterName is the TiDB cluster name that should backup from or restore to.
clusterName: tidb-cluster
## backup | restore | scheduled-restore
mode: backup
# name is the backup name  fullbackup-{{ date "200601021504" .Release.Time }}
name: fullbackup-20191031
image:
  pullPolicy: IfNotPresent
  # https://github.com/pingcap/tidb-cloud-backup
  backup: pingcap/tidb-cloud-backup:20190610

# Add additional labels for backup/restore job's pod
# ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
extraLabels: {}

# secretName is the name of the secret which stores user and password used for backup/restore
# Note: you must give the user enough privilege to do the backup and restore
# you can create the secret by:
# kubectl create secret generic backup-secret --namespace=<namespace> --from-literal=user=root --from-literal=password=<password>
secretName: backup-secret

storage:
  className: local-storage
  size: 100Gi

# backupOptions is the options of mydumper https://github.com/maxbube/mydumper/blob/master/docs/mydumper_usage.rst#options
backupOptions: "--verbose=3"
# Set the tidb_snapshot to be used for the backup
# Use `show master status` to get the ts:
#   MySQL [(none)]> show master status;
#   +-------------+--------------------+--------------+------------------+-------------------+
#   | File        | Position           | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
#   +-------------+--------------------+--------------+------------------+-------------------+
#   | tidb-binlog | 409076965619269635 |              |                  |                   |
#   +-------------+--------------------+--------------+------------------+-------------------+
#   1 row in set (0.01 sec)
# For this example, "409076965619269635" is the initialCommitTs
initialCommitTs: ""
# restoreOptions is the options of loader https://www.pingcap.com/docs-cn/tools/loader/
restoreOptions: "-t 16"

# By default, the backup/restore uses PV to store/load backup data
# You can choose to store/load backup data to/from gcp, ceph or s3 bucket by enabling the following corresponding section:

# backup to or restore from gcp bucket, the backup path is in the form of <clusterName>-<name>
gcp: {}
  # bucket: ""
  # secretName is the name of the secret which stores the gcp service account credentials json file
  # The service account must have read/write permission to the above bucket.
  # Read the following document to create the service account and download the credentials file as credentials.json:
  # https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually
  # And then create the secret by:
  # kubectl create secret generic gcp-backup-secret --namespace=<namespace> --from-file=./credentials.json
  # secretName: gcp-backup-secret

# backup to or restore from ceph bucket, the backup path is in the form of <clusterName>-<name>
ceph: {}
  # endpoint: ""
  # bucket: ""
  # secretName is the name of the secret which stores ceph object store access key and secret key
  # You can create the secret by:
  # kubectl create secret generic ceph-backup-secret --namespace=<namespace> --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
  # secretName: ceph-backup-secret

# backup to or restore from s3 bucket, the backup path is in the form of <clusterName>-<name>
s3: {}
  # region: ""
  # bucket: ""
  # secretName is the name of the secret which stores s3 object store access key and secret key
  # You can create the secret by:
  # kubectl create secret generic s3-backup-secret --namespace=<namespace> --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
  # secretName: s3-backup-secret

集群还有两个可用的local-storage

pvc 绑定没有什么问题,你这个 backup-secret secret 创建好了么?