按照官方文档在kind里面部署TiDB集群,Operator已经部署成功,但是在部署TiDB-Cluster集群时,发现缺少TiDB实例的pod。步骤是参考如下官方文档,一些镜像已经提前加载到K8s集群中了:
https://docs.pingcap.com/zh/tidb-in-kubernetes/stable/get-started#第-3-步部署-tidb-集群和监控
[root@centos7 ~]# kubectl get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-565d847f94-cpx9s 1/1 Running 0 39m
kube-system coredns-565d847f94-mjfps 1/1 Running 0 39m
kube-system etcd-kind-control-plane 1/1 Running 0 39m
kube-system kindnet-kjhzx 1/1 Running 0 39m
kube-system kube-apiserver-kind-control-plane 1/1 Running 0 39m
kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 39m
kube-system kube-proxy-f9rdv 1/1 Running 0 39m
kube-system kube-scheduler-kind-control-plane 1/1 Running 0 39m
local-path-storage local-path-provisioner-684f458cdd-rb8jv 1/1 Running 0 39m
tidb-admin tidb-controller-manager-86f4899f-xrxwg 1/1 Running 0 37m
tidb-admin tidb-scheduler-7957d5b4d6-7fj7j 2/2 Running 0 37m
[root@centos7 ~]# docker exec -it adecb14994c2 crictl images
IMAGE TAG IMAGE ID SIZE
docker.io/grafana/grafana 7.5.11 6cfe8ab94353f 206MB
docker.io/kindest/kindnetd v20221004-44d545d1 d6e3e26021b60 25.8MB
docker.io/kindest/local-path-helper v20220607-9a4d8d2a d2f902e939cc3 2.86MB
docker.io/kindest/local-path-provisioner v0.0.22-kind.0 4c1e997385b8f 17.4MB
docker.io/library/alpine 3.16.0 e66264b98777e 5.81MB
docker.io/pingcap/advanced-statefulset v0.4.0 70c265c22e08e 49.4MB
docker.io/pingcap/pd v6.5.0 69c043a19b5d9 165MB
docker.io/pingcap/tidb-backup-manager v1.4.0 fd4dcce8769e5 579MB
docker.io/pingcap/tidb-dashboard v6.5.0 e269b8cd23749 267MB
docker.io/pingcap/tidb-monitor-initializer v6.5.0 dc26054ae594b 6.49MB
docker.io/pingcap/tidb-monitor-reloader v1.0.1 912ff2b5e6562 20.7MB
docker.io/pingcap/tidb-operator v1.4.0 30563eeb9ca04 298MB
docker.io/pingcap/tidb v6.5.0 500953de794e2 200MB
docker.io/pingcap/tikv v6.5.0 9621b51b12826 551MB
docker.io/prom/prometheus v2.27.1 86ea6f86fc575 187MB
k8s.gcr.io/kube-scheduler v1.25.3 6d23ec0e8b87e 51.9MB
quay.io/prometheus-operator/prometheus-config-reloader v0.49.0 ae8e4c9feb781 13.8MB
registry.k8s.io/coredns/coredns v1.9.3 5185b96f0becf 14.8MB
registry.k8s.io/etcd 3.5.4-0 a8a176a5d5d69 102MB
registry.k8s.io/kube-apiserver v1.25.3 4bc1b1e750e34 76.5MB
registry.k8s.io/kube-controller-manager v1.25.3 580dca99efc3b 64.5MB
registry.k8s.io/kube-proxy v1.25.3 86063cd68dfc9 63.3MB
registry.k8s.io/kube-scheduler v1.25.3 5225724a11400 51.9MB
registry.k8s.io/pause 3.7 221177c6082a8 311kB
[root@centos7 ~]# kubectl create namespace tidb-cluster && kubectl -n tidb-cluster apply -f tidb-cluster.yaml
namespace/tidb-cluster created
tidbcluster.pingcap.com/basic created
[root@centos7 ~]#
[root@centos7 ~]# kubectl get po -n tidb-cluster
NAME READY STATUS RESTARTS AGE
basic-discovery-5db6c75657-wrz6l 1/1 Running 0 36s
basic-pd-0 1/1 Running 0 36s
basic-tikv-0 1/1 Running 0 28s
[root@centos7 ~]#
[root@centos7 ~]# kubectl get po -n tidb-cluster
NAME READY STATUS RESTARTS AGE
basic-discovery-5db6c75657-wrz6l 1/1 Running 0 43s
basic-pd-0 1/1 Running 0 43s
basic-tikv-0 1/1 Running 0 35s
[root@centos7 ~]# kubectl get TidbCluster -n tidb-cluster basic
NAME READY PD STORAGE READY DESIRE TIKV STORAGE READY DESIRE TIDB READY DESIRE AGE
basic False pingcap/pd:v6.5.0 1Gi 1 1 1Gi 1 1 1 62s
[root@centos7 ~]#
tidb-cluster.yaml的内容如下:
# IT IS NOT SUITABLE FOR PRODUCTION USE.
# This YAML describes a basic TiDB cluster with minimum resource requirements,
# which should be able to run in any Kubernetes cluster with storage support.
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster
metadata:
name: basic
spec:
version: v6.5.0
timezone: UTC
pvReclaimPolicy: Retain
enableDynamicConfiguration: true
configUpdateStrategy: RollingUpdate
discovery: {}
helper:
image: alpine:3.16.0
pd:
baseImage: pingcap/pd
maxFailoverCount: 0
replicas: 1
# if storageClassName is not set, the default Storage Class of the Kubernetes cluster will be used
# storageClassName: local-storage
requests:
storage: "1Gi"
config: {}
tikv:
baseImage: pingcap/tikv
maxFailoverCount: 0
# If only 1 TiKV is deployed, the TiKV region leader
# cannot be transferred during upgrade, so we have
# to configure a short timeout
evictLeaderTimeout: 1m
replicas: 1
# if storageClassName is not set, the default Storage Class of the Kubernetes cluster will be used
# storageClassName: local-storage
requests:
storage: "1Gi"
config:
storage:
# In basic examples, we set this to avoid using too much storage.
reserve-space: "0MB"
rocksdb:
# In basic examples, we set this to avoid the following error in some Kubernetes clusters:
# "the maximum number of open file descriptors is too small, got 1024, expect greater or equal to 82920"
max-open-files: 256
raftdb:
max-open-files: 256
tidb:
baseImage: pingcap/tidb
maxFailoverCount: 0
replicas: 1
service:
type: ClusterIP
config: {}
部署完集群后,只有PD和TiKV,没有TiDB,这个怎么排查
[root@centos7 ~]# kubectl get po -n tidb-cluster
NAME READY STATUS RESTARTS AGE
basic-discovery-5db6c75657-wrz6l 1/1 Running 0 43s
basic-pd-0 1/1 Running 0 43s
basic-tikv-0 1/1 Running 0 35s
[root@centos7 ~]# kubectl get TidbCluster -n tidb-cluster basic
NAME READY PD STORAGE READY DESIRE TIKV STORAGE READY DESIRE TIDB READY DESIRE AGE
basic False pingcap/pd:v6.5.0 1Gi 1 1 1Gi 1 1 1 62s