-
【TiDB 版本】:
v4.0.8 -
【k8s版本】
v1.19.3 -
【问题描述】:
根据k8s部署文档中的示例https://github.com/pingcap/tidb-operator/blob/master/examples/tiflash/tidb-cluster.yaml部署,只将名字改成了k8s-staging-local-pv-test,apply后未启动任何pod -
【具体流程】
在此之前,k8s集群中配置了一共三个节点
[root@{master-node} ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
{master-node} Ready master 4d15h v1.19.3
{follower-node-1} Ready 4d15h v1.19.3
{follower-node-2} Ready 2d15h v1.19.4
集群使用local pv持久化
[root@{master-node} ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-f9fd979d6-fctc9 1/1 Running 2 4d15h
kube-system coredns-f9fd979d6-tfmnp 1/1 Running 2 4d15h
kube-system etcd-{master-node} 1/1 Running 3 4d15h
kube-system kube-{master-node} 1/1 Running 3 4d15h
kube-system kube-controller-manager-{master-node} 1/1 Running 3 4d15h
kube-system kube-flannel-ds-6d2cg 1/1 Running 3 4d15h
kube-system kube-flannel-ds-gvlfj 1/1 Running 0 4d15h
kube-system kube-flannel-ds-lmfmr 1/1 Running 0 2d15h
kube-system kube-proxy-72qkf 1/1 Running 0 4d15h
kube-system kube-proxy-8r2bw 1/1 Running 2 4d15h
kube-system kube-proxy-rhv2x 1/1 Running 0 2d15h
kube-system kube-scheduler-{master-node} 1/1 Running 4 4d15h
kube-system local-volume-provisioner-8mjr6 1/1 Running 0 2d15h
kube-system local-volume-provisioner-lcgpp 1/1 Running 0 4d13h
kube-system tiller-deploy-7b56c8dfb7-d48wk 1/1 Running 0 2d22h
tidb-admin tidb-controller-manager-85ffcb7557-w95mv 1/1 Running 2 2d22h
tidb-admin tidb-scheduler-667657d7d-88srz 2/2 Running 1 2d22h[root@{master-node} ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
local-pv-949fb3ba 98Gi RWO Delete Available local-storage 4d13h
TiDB Operator的设置参考了示例文档
[root@{master-node} k8s-staging-local-pv-test]# pwd
/root/k8s-staging-local-pv-test
[root@{master-node} k8s-staging-local-pv-test]# cat tidb-cluster.yaml
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster
metadata:
name: k8s-staging-local-pv-test
spec:
configUpdateStrategy: RollingUpdate
enableDynamicConfiguration: true
enablePVReclaim: false
imagePullPolicy: IfNotPresent
pd:
baseImage: pingcap/pd
config:
log:
level: info
replication:
enable-placement-rules: “true”
location-labels:
- zone
- host
max-replicas: 3
imagePullPolicy: IfNotPresent
maxFailoverCount: 3
replicas: 3
requests:
storage: 10Gi
storageClassName: local-storage
pvReclaimPolicy: Retain
schedulerName: tidb-scheduler
services:
name: pd (此处最左侧是减号)
type: ClusterIP
tidb:
baseImage: pingcap/tidb
config:
log:
file:
max-backups: 3
level: info
imagePullPolicy: IfNotPresent
maxFailoverCount: 3
replicas: 2
separateSlowLog: true
service:
type: NodePort
slowLogTailer:
image: busybox:1.26.2
imagePullPolicy: IfNotPresent
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 20m
memory: 5Mi
tiflash:
baseImage: pingcap/tiflash
maxFailoverCount: 3
replicas: 2
storageClaims:
- resources:
requests:
storage: 10Gi
storageClassName: local-storage
tikv:
baseImage: pingcap/tikv
config:
log-level: info
imagePullPolicy: IfNotPresent
maxFailoverCount: 3
replicas: 3
requests:
storage: 10Gi
storageClassName: local-storage
timezone: UTC
version: v4.0.8
使用apply后,未生成任何新的pod
root@{master-node} ~]# kubectl apply -f k8s-staging-local-pv-test -n k8s-staging-local-pv-test
tidbcluster.pingcap.com/k8s-staging-local-pv-test created
[root@{master-node} ~]# kubectl get po -n k8s-staging-local-pv-test -l app.kubernetes.io/instance=k8s-staging-local-pv-test
No resources found in k8s-staging-local-pv-test namespace.
[root@{master-node} ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-f9fd979d6-fctc9 1/1 Running 2 4d15h
kube-system coredns-f9fd979d6-tfmnp 1/1 Running 2 4d15h
kube-system etcd-{master-node} 1/1 Running 3 4d15h
kube-system kube-apiserver-{master-node} 1/1 Running 3 4d15h
kube-system kube-controller-manager-{master-node} 1/1 Running 3 4d15h
kube-system kube-flannel-ds-6d2cg 1/1 Running 3 4d15h
kube-system kube-flannel-ds-gvlfj 1/1 Running 0 4d15h
kube-system kube-flannel-ds-lmfmr 1/1 Running 0 2d15h
kube-system kube-proxy-72qkf 1/1 Running 0 4d15h
kube-system kube-proxy-8r2bw 1/1 Running 2 4d15h
kube-system kube-proxy-rhv2x 1/1 Running 0 2d15h
kube-system kube-scheduler-{master-node} 1/1 Running 4 4d15h
kube-system local-volume-provisioner-8mjr6 1/1 Running 0 2d15h
kube-system local-volume-provisioner-lcgpp 1/1 Running 0 4d13h
kube-system tiller-deploy-7b56c8dfb7-d48wk 1/1 Running 0 2d22h
tidb-admin tidb-controller-manager-85ffcb7557-w95mv 1/1 Running 2 2d22h
tidb-admin tidb-scheduler-667657d7d-88srz 2/2 Running 1 2d22h