【现象】进行TiKV缩容后(4->3),Pod被删除,使用如下命令查看时status.replicas正常(数值为3),
kubectl get tidbcluster ${cluster_name} -n ${namespace} -oyaml
但是在stores中,被删除的store进入Tombstone状态(TiDB集群的名字和Namespace都是k8s-staging-local-pv)
tombstoneStores:
"1":
id: "1"
ip: k8s-staging-local-pv-tikv-3.k8s-staging-local-pv-tikv-peer.k8s-staging-local-pv.svc
lastHeartbeatTime: "2020-11-30T08:20:45Z"
lastTransitionTime: null
leaderCount: 0
podName: k8s-staging-local-pv-tikv-3
state: Tombstone
文档中显示Tombstone状态是TiKV异常下线https://docs.pingcap.com/zh/tidb-in-kubernetes/stable/exceptions#tikv-store-异常进入-tombstone-状态
另外,由于之前已经进行过一次手动缩容,在tombstoneStores下有两个store(另一个store所属的集群已经被销毁,销毁后删除了pvc,pv也被重建)
【问题】Tombstone状态到底是不是异常状态?如果不是,如何消除这个状态,将这段信息删除(之前已经尝试过删除对应的PVC并重建PV,无效)
“kubectl get tidbcluster ${cluster_name} -n ${namespace} -oyaml”的完整输出
[root@node-101135-kok8tawr k8s-staging-local-pv]# kubectl get tidbcluster k8s-staging-local-pv -n k8s-staging-local-pv -oyaml
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{“apiVersion”:“pingcap.com/v1alpha1",“kind”:“TidbCluster”,“metadata”:{“annotations”:{},“name”:“k8s-staging-local-pv”,“namespace”:“k8s-staging-local-pv”},“spec”:{“configUpdateStrategy”:“RollingUpdate”,“enableDynamicConfiguration”:true,“enablePVReclaim”:false,“hostNetwork”:true,“imagePullPolicy”:“IfNotPresent”,“pd”:{“baseImage”:“pingcap/pd”,“config”:{},“mountClusterClientSecret”:false,“replicas”:1,“requests”:{“cpu”:“2000m”,“memory”:“8Gi”,“storage”:“10Gi”},“storageClassName”:“local-storage”},“pvReclaimPolicy”:“Retain”,“tidb”:{“baseImage”:“pingcap/tidb”,“config”:{},“replicas”:1,“requests”:{“cpu”:“2000m”,“memory”:“8Gi”,“storage”:“10Gi”},“service”:{“externalTrafficPolicy”:“Cluster”,“type”:“NodePort”}},“tikv”:{“baseImage”:“pingcap/tikv”,“config”:{},“mountClusterClientSecret”:false,“replicas”:4,“requests”:{“cpu”:“2000m”,“memory”:“8Gi”,“storage”:“50Gi”},“storageClassName”:“local-storage”},“timezone”:“UTC”,“tlsCluster”:{},“version”:"v4.0.8”}}
creationTimestamp: “2020-11-30T08:05:41Z”
generation: 70
managedFields:
- apiVersion: pingcap.com/v1alpha1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.: {}
f:kubectl.kubernetes.io/last-applied-configuration: {}
f:spec:
.: {}
f:configUpdateStrategy: {}
f:enableDynamicConfiguration: {}
f:enablePVReclaim: {}
f:hostNetwork: {}
f:imagePullPolicy: {}
f:pd:
.: {}
f:baseImage: {}
f:mountClusterClientSecret: {}
f:replicas: {}
f:requests:
.: {}
f:memory: {}
f:storage: {}
f:storageClassName: {}
f:pvReclaimPolicy: {}
f:tidb:
.: {}
f:baseImage: {}
f:replicas: {}
f:requests:
.: {}
f:memory: {}
f:storage: {}
f:service:
.: {}
f:externalTrafficPolicy: {}
f:type: {}
f:tikv:
.: {}
f:baseImage: {}
f:mountClusterClientSecret: {}
f:requests:
.: {}
f:memory: {}
f:storage: {}
f:storageClassName: {}
f:timezone: {}
f:tlsCluster: {}
f:version: {}
manager: kubectl-client-side-apply
operation: Update
time: “2020-11-30T08:05:41Z”
- apiVersion: pingcap.com/v1alpha1
fieldsType: FieldsV1
fieldsV1:
f:spec:
f:tikv:
f:replicas: {}
manager: kubectl-edit
operation: Update
time: “2020-11-30T08:20:46Z”
- apiVersion: pingcap.com/v1alpha1
fieldsType: FieldsV1
fieldsV1:
f:spec:
f:discovery: {}
f:pd:
f:config: {}
f:maxFailoverCount: {}
f:requests:
f:cpu: {}
f:tidb:
f:config: {}
f:maxFailoverCount: {}
f:requests:
f:cpu: {}
f:tikv:
f:config: {}
f:maxFailoverCount: {}
f:requests:
f:cpu: {}
f:status:
.: {}
f:auto-scaler: {}
f:clusterID: {}
f:conditions: {}
f:pd:
.: {}
f:image: {}
f:leader:
.: {}
f:clientURL: {}
f:health: {}
f
{}
f:lastTransitionTime: {}
f:name: {}
f:members:
.: {}
f:k8s-staging-local-pv-pd-0:
.: {}
f:clientURL: {}
f:health: {}
f
{}
f:lastTransitionTime: {}
f:name: {}
f:phase: {}
f:statefulSet:
.: {}
f:collisionCount: {}
f:currentReplicas: {}
f:currentRevision: {}
f:observedGeneration: {}
f:readyReplicas: {}
f:replicas: {}
f:updateRevision: {}
f:updatedReplicas: {}
f:synced: {}
f:pump: {}
f:ticdc: {}
f:tidb:
.: {}
f:image: {}
f:members:
.: {}
f:k8s-staging-local-pv-tidb-0:
.: {}
f:health: {}
f:lastTransitionTime: {}
f:name: {}
f:node: {}
f:phase: {}
f:statefulSet:
.: {}
f:collisionCount: {}
f:currentReplicas: {}
f:currentRevision: {}
f:observedGeneration: {}
f:readyReplicas: {}
f:replicas: {}
f:updateRevision: {}
f:updatedReplicas: {}
f:tiflash: {}
f:tikv:
.: {}
f:image: {}
f:phase: {}
f:statefulSet:
.: {}
f:collisionCount: {}
f:currentReplicas: {}
f:currentRevision: {}
f:observedGeneration: {}
f:readyReplicas: {}
f:replicas: {}
f:updateRevision: {}
f:updatedReplicas: {}
f:stores:
.: {}
f:4:
.: {}
f
{}
f:ip: {}
f:lastHeartbeatTime: {}
f:lastTransitionTime: {}
f:leaderCount: {}
f:podName: {}
f:state: {}
f:5:
.: {}
f
{}
f:ip: {}
f:lastHeartbeatTime: {}
f:lastTransitionTime: {}
f:leaderCount: {}
f:podName: {}
f:state: {}
f:1001:
.: {}
f
{}
f:ip: {}
f:lastHeartbeatTime: {}
f:lastTransitionTime: {}
f:leaderCount: {}
f:podName: {}
f:state: {}
f:synced: {}
f:tombstoneStores:
.: {}
f:1:
.: {}
f
{}
f:ip: {}
f:lastHeartbeatTime: {}
f:lastTransitionTime: {}
f:leaderCount: {}
f:podName: {}
f:state: {}
f:6:
.: {}
f
{}
f:ip: {}
f:lastHeartbeatTime: {}
f:lastTransitionTime: {}
f:leaderCount: {}
f:podName: {}
f:state: {}
manager: tidb-controller-manager
operation: Update
time: “2020-11-30T08:21:33Z”
name: k8s-staging-local-pv
namespace: k8s-staging-local-pv
resourceVersion: “4236361”
selfLink: /apis/pingcap.com/v1alpha1/namespaces/k8s-staging-local-pv/tidbclusters/k8s-staging-local-pv
uid: ccd1d6b7-2551-4a8c-9959-676ad8b76793
spec:
configUpdateStrategy: RollingUpdate
discovery: {}
enableDynamicConfiguration: true
enablePVReclaim: false
hostNetwork: true
imagePullPolicy: IfNotPresent
pd:
baseImage: pingcap/pd
config: “”
maxFailoverCount: 3
mountClusterClientSecret: false
replicas: 1
requests:
cpu: “2”
memory: 8Gi
storage: 10Gi
storageClassName: local-storage
pvReclaimPolicy: Retain
tidb:
baseImage: pingcap/tidb
config: |
[log]
[log.file]
max-backups = 3
maxFailoverCount: 3
replicas: 1
requests:
cpu: “2”
memory: 8Gi
storage: 10Gi
service:
externalTrafficPolicy: Cluster
type: NodePort
tikv:
baseImage: pingcap/tikv
config: “”
maxFailoverCount: 3
mountClusterClientSecret: false
replicas: 3
requests:
cpu: “2”
memory: 8Gi
storage: 50Gi
storageClassName: local-storage
timezone: UTC
tlsCluster: {}
version: v4.0.8
status:
auto-scaler: null
clusterID: “6900815176203751241”
conditions:
- lastTransitionTime: “2020-11-30T08:20:54Z”
lastUpdateTime: “2020-11-30T08:20:54Z”
message: TiDB cluster is fully up and running
reason: Ready
status: “True”
type: Ready
pd:
image: pingcap/pd:v4.0.8
leader:
clientURL: http://k8s-staging-local-pv-pd-0.k8s-staging-local-pv-pd-peer.k8s-staging-local-pv.svc:2379
health: true
id: “14632557995082257432”
lastTransitionTime: “2020-11-30T08:05:53Z”
name: k8s-staging-local-pv-pd-0
members:
k8s-staging-local-pv-pd-0:
clientURL: http://k8s-staging-local-pv-pd-0.k8s-staging-local-pv-pd-peer.k8s-staging-local-pv.svc:2379
health: true
id: “14632557995082257432”
lastTransitionTime: “2020-11-30T08:05:53Z”
name: k8s-staging-local-pv-pd-0
phase: Normal
statefulSet:
collisionCount: 0
currentReplicas: 1
currentRevision: k8s-staging-local-pv-pd-95584cf7b
observedGeneration: 1
readyReplicas: 1
replicas: 1
updateRevision: k8s-staging-local-pv-pd-95584cf7b
updatedReplicas: 1
synced: true
pump: {}
ticdc: {}
tidb:
image: pingcap/tidb:v4.0.8
members:
k8s-staging-local-pv-tidb-0:
health: true
lastTransitionTime: “2020-11-30T08:11:04Z”
name: k8s-staging-local-pv-tidb-0
node: node-101214-zfocgndy.kscn
phase: Normal
statefulSet:
collisionCount: 0
currentReplicas: 1
currentRevision: k8s-staging-local-pv-tidb-67b796b795
observedGeneration: 1
readyReplicas: 1
replicas: 1
updateRevision: k8s-staging-local-pv-tidb-67b796b795
updatedReplicas: 1
tiflash: {}
tikv:
image: pingcap/tikv:v4.0.8
phase: Normal
statefulSet:
collisionCount: 0
currentReplicas: 3
currentRevision: k8s-staging-local-pv-tikv-cbf98b5b4
observedGeneration: 2
readyReplicas: 3
replicas: 3
updateRevision: k8s-staging-local-pv-tikv-cbf98b5b4
updatedReplicas: 3
stores:
“4”:
id: “4”
ip: k8s-staging-local-pv-tikv-0.k8s-staging-local-pv-tikv-peer.k8s-staging-local-pv.svc
lastHeartbeatTime: “2020-11-30T08:21:25Z”
lastTransitionTime: “2020-11-30T08:11:00Z”
leaderCount: 10
podName: k8s-staging-local-pv-tikv-0
state: Up
“5”:
id: “5”
ip: k8s-staging-local-pv-tikv-1.k8s-staging-local-pv-tikv-peer.k8s-staging-local-pv.svc
lastHeartbeatTime: “2020-11-30T08:21:25Z”
lastTransitionTime: “2020-11-30T08:11:00Z”
leaderCount: 10
podName: k8s-staging-local-pv-tikv-1
state: Up
“1001”:
id: “1001”
ip: k8s-staging-local-pv-tikv-2.k8s-staging-local-pv-tikv-peer.k8s-staging-local-pv.svc
lastHeartbeatTime: “2020-11-30T08:21:25Z”
lastTransitionTime: “2020-11-30T08:11:00Z”
leaderCount: 1
podName: k8s-staging-local-pv-tikv-2
state: Up
synced: true
tombstoneStores:
“1”:
id: “1”
ip: k8s-staging-local-pv-tikv-3.k8s-staging-local-pv-tikv-peer.k8s-staging-local-pv.svc
lastHeartbeatTime: “2020-11-30T08:20:45Z”
lastTransitionTime: null
leaderCount: 0
podName: k8s-staging-local-pv-tikv-3
state: Tombstone
“6”:
id: “6”
ip: k8s-staging-local-pv-tikv-3.k8s-staging-local-pv-tikv-peer.k8s-staging-local-pv.svc
lastHeartbeatTime: “2020-11-30T07:51:25Z”
lastTransitionTime: null
leaderCount: 0
podName: k8s-staging-local-pv-tikv-3
state: Tombstone