使用 pv方式部署tidb-operator问题

使用本地pv部署tidb-operator:
for i in seq 3; do mkdir -p /home/data/pv0$i; done
for i in seq 3; do
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolume
metadata:
name: tidb-cluster-r34-pv0${i}
spec:
capacity:
storage: 50Gi
volumeMode: Filesystem
accessModes:

  • ReadWriteOnce
    persistentVolumeReclaimPolicy: Delete
    storageClassName: m4-tikv-storage
    local:
    path: /home/data/pv0${i}
    nodeAffinity:
    required:
    nodeSelectorTerms:

kubectl get pv |grep -i tidb-cluster
tidb-cluster-r34-pv01 5Gi RWO Recycle Bound tidb-m3/tikv-m3-tikv-0 new-storage 29h
tidb-cluster-r34-pv02 5Gi RWO Recycle Available new-storage 29h
tidb-cluster-r34-pv03 5Gi RWO Recycle Bound tidb-m3/pd-m3-pd-0 new-storage

tidb-operator.yaml
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster
metadata:
name: m3
namespace: tidb-m3
annotations:
pingcap.com/ha-topology-key: kubernetes.io/hostname
spec:
version: “v6.5.0”
timezone: Asia/Shanghai
hostNetwork: false
imagePullPolicy: IfNotPresent

enableDynamicConfiguration: true
configUpdateStrategy: RollingUpdate

pd:
baseImage: pingcap/pd
maxFailoverCount: 6
config: |
[dashboard]
internal-proxy = true
lease = 3
enable-prevote = true

config: {}

replicas: 1
requests:
  cpu: "100m"
  memory: "400Mi"
  storage: 3Gi
limits:
  cpu: "200m"
  memory: "4Gi"
mountClusterClientSecret: false
storageClassName: "new-storage"

tidb:
baseImage: pingcap/tidb
maxFailoverCount: 6
replicas: 1
requests:
cpu: “100m”
memory: “400Mi”
limits:
cpu: “2000m”
memory: “4Gi”
config: |
split-table = true
oom-action = “log”

config: {}

service:
  externalTrafficPolicy: Cluster
  type: NodePort
  mysqlNodePort: 35082
  statusNodePort: 35082

tikv:
baseImage: pingcap/tikv
replicas: 1
maxFailoverCount: 6
config: {}
requests:
cpu: “100m”
storage: 4Gi
memory: “400Mi”
limits:
cpu: “2000m”
memory: “2Gi”
mountClusterClientSecret: false
storageClassName: “new-storage”

enablePVReclaim: false
pvReclaimPolicy: Recycle
tlsCluster: {}

kubectl apply -f tidb-m3.yaml

[root@vm10-2-103-242 m3-label]# kubectl get pod -n tidb-m3
NAME READY STATUS RESTARTS AGE
m3-discovery-6b7b77d77d-ch8g9 1/1 Running 0 29h
m3-pd-0 1/1 Running 0 29h
m3-tikv-0 1/1 Running 0 29h

tidb pod一直无法创建成功?
查看tidb-operator的日志,报如下错误
E0525 13:48:40.134181 1 tidb_cluster_controller.go:143] TidbCluster: tidb-m3/m3, sync failed failed to sync volume status for tikv: failed to get desired volumes: cannot get sc new-storage, requeuing

root@vm10-2-103-242 m3-label]# kubectl describe pv tidb-cluster-r34-pv01
Name: tidb-cluster-r34-pv01
Labels:
Annotations: pv.kubernetes.io/bound-by-controller: yes
Finalizers: [kubernetes.io/pv-protection]
StorageClass: new-storage
Status: Bound
Claim: tidb-m3/tikv-m3-tikv-0
Reclaim Policy: Recycle
Access Modes: RWO
VolumeMode: Filesystem
Capacity: 5Gi
Node Affinity:
Required Terms:
Term 0: kubernetes.io/hostname in [vm34]
Message:
Source:
Type: LocalVolume (a persistent volume backed by local storage on a node)
Path: /home/data/pv01
Events:

请问是什么原因呢?

错误提示表明在尝试创建 TiDB 集群时,TiDB Operator 无法同步卷状态,特别是对于 TiKV 组件。错误信息 “cannot get sc new-storage, requeuing” 指出了问题所在,即无法获取名为 “new-storage” 的存储类(StorageClass),因此请求被重新排队。