【SOP 系列 15】如何在 Kubernetes 上部署 TiDB-Operator (下)

2. 部署 TiDB Operator

2.1 创建 TiDB Operator CRD

2.1.1 下载 TiDB Cluster CRD 部署文件

[root@r21 soft]# wget https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.7/manifests/crd.yaml

2.1.2 创建 TiDB Cluster CRD

[root@r21 soft]# kubectl apply -f crd.yaml
customresourcedefinition.apiextensions.k8s.io/tidbclusters.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/backups.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/restores.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/backupschedules.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbmonitors.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbinitializers.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbclusterautoscalers.pingcap.com created

2.1.3 检查 CRD 状态

[root@r21 soft]# kubectl get crd
NAME                                 CREATED AT
backups.pingcap.com                  2020-12-25T10:39:10Z
backupschedules.pingcap.com          2020-12-25T10:39:11Z
restores.pingcap.com                 2020-12-25T10:39:11Z
tidbclusterautoscalers.pingcap.com   2020-12-25T10:39:11Z
tidbclusters.pingcap.com             2020-12-25T10:39:10Z
tidbinitializers.pingcap.com         2020-12-25T10:39:11Z
tidbmonitors.pingcap.com             2020-12-25T10:39:11Z

2.2 创建 TiDB Operator

2.2.1 下载 TiDB Operator 的 docker image

docker pull pingcap/tidb-operator:v1.1.7
docker pull pingcap/tidb-backup-manager:v1.1.7
docker pull pingcap/advanced-statefulset:v0.3.3


mkdir -p /opt/soft/docker-image
docker save -o tidb-backup-manager.tar pingcap/tidb-backup-manager
docker save -o tidb-operator.tar pingcap/tidb-operator
docker save -o advanced-statefulset.tar pingcap/advanced-statefulset

2.2.2 创建 tidb-operator 部署文件

## use bellowing deployment yaml file to install tidb-operator
cat tidb-operator-deploy.yaml

-----------------------------------------------------------------------------------
# Source: tidb-operator/templates/scheduler-policy-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: tidb-scheduler-policy
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
data:
  policy.cfg: |-
    {
      "kind" : "Policy",
      "apiVersion" : "v1",
      "predicates": [
        {"name": "NoVolumeZoneConflict"},
        {"name": "MaxEBSVolumeCount"},
        {"name": "MaxAzureDiskVolumeCount"},
        {"name": "NoDiskConflict"},
        {"name": "GeneralPredicates"},
        {"name": "PodToleratesNodeTaints"},
        {"name": "CheckVolumeBinding"},
        {"name": "MaxGCEPDVolumeCount"},
        {"name": "MatchInterPodAffinity"},
        {"name": "CheckVolumeBinding"}
      ],
      "priorities": [
        {"name": "SelectorSpreadPriority", "weight": 1},
        {"name": "InterPodAffinityPriority", "weight": 1},
        {"name": "LeastRequestedPriority", "weight": 1},
        {"name": "BalancedResourceAllocation", "weight": 1},
        {"name": "NodePreferAvoidPodsPriority", "weight": 1},
        {"name": "NodeAffinityPriority", "weight": 1},
        {"name": "TaintTolerationPriority", "weight": 1}
      ],
      "extenders": [
        {
          "urlPrefix": "http://127.0.0.1:10262/scheduler",
          "filterVerb": "filter",
          "preemptVerb": "preempt",
          "weight": 1,
          "httpTimeout": 30000000000,
          "enableHttps": false
        }
      ]
    }


---
# Source: tidb-operator/templates/controller-manager-rbac.yaml

kind: ServiceAccount
apiVersion: v1
metadata:
  name: tidb-controller-manager
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: controller-manager
    helm.sh/chart: tidb-operator-v1.1.7
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:tidb-controller-manager
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: controller-manager
    helm.sh/chart: tidb-operator-v1.1.7
rules:
- apiGroups: [""]
  resources:
  - services
  - events
  verbs: ["*"]
- apiGroups: [""]
  resources: ["endpoints","configmaps"]
  verbs: ["create", "get", "list", "watch", "update","delete"]
- apiGroups: [""]
  resources: ["serviceaccounts"]
  verbs: ["create","get","update","delete"]
- apiGroups: ["batch"]
  resources: ["jobs"]
  verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["create", "update", "get", "list", "watch","delete"]
- apiGroups: [""]
  resources: ["persistentvolumeclaims"]
  verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
  resources: ["pods"]
  verbs: ["get", "list", "watch","update", "delete"]
- apiGroups: ["apps"]
  resources: ["statefulsets","deployments", "controllerrevisions"]
  verbs: ["*"]
- apiGroups: ["extensions"]
  resources: ["ingresses"]
  verbs: ["*"]
- apiGroups: ["apps.pingcap.com"]
  resources: ["statefulsets", "statefulsets/status"]
  verbs: ["*"]
- apiGroups: ["pingcap.com"]
  resources: ["*"]
  verbs: ["*"]
- nonResourceURLs: ["/metrics"]
  verbs: ["get"]
- apiGroups: [""]
  resources: ["nodes"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["persistentvolumes"]
  verbs: ["get", "list", "watch", "patch","update"]
- apiGroups: ["storage.k8s.io"]
  resources: ["storageclasses"]
  verbs: ["get", "list", "watch"]

- apiGroups: ["rbac.authorization.k8s.io"]
  resources: [clusterroles,roles]
  verbs: ["escalate","create","get","update", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
  resources: ["rolebindings","clusterrolebindings"]
  verbs: ["create","get","update", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:tidb-controller-manager
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: controller-manager
    helm.sh/chart: tidb-operator-v1.1.7
subjects:
- kind: ServiceAccount
  name: tidb-controller-manager
  namespace: tidb-admin
roleRef:
  kind: ClusterRole
  name: tidb-operator:tidb-controller-manager
  apiGroup: rbac.authorization.k8s.io

---
# Source: tidb-operator/templates/scheduler-rbac.yaml

kind: ServiceAccount
apiVersion: v1
metadata:
  name: tidb-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:tidb-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
rules:
# ConfigMap permission for --policy-configmap
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["pods"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["nodes"]
  verbs: ["get", "list"]
- apiGroups: ["pingcap.com"]
  resources: ["tidbclusters"]
  verbs: ["get"]
- apiGroups: [""]
  resources: ["persistentvolumeclaims"]
  verbs: ["get", "list", "update"]
# Extra permissions for endpoints other than kube-scheduler
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["delete", "get", "patch", "update"]
- apiGroups: ["coordination.k8s.io"]
  resources: ["leases"]
  verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
  resources: ["leases"]
  resourceNames: ["tidb-scheduler"]
  verbs: ["get", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:tidb-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
subjects:
- kind: ServiceAccount
  name: tidb-scheduler
  namespace: tidb-admin
roleRef:
  kind: ClusterRole
  name: tidb-operator:tidb-scheduler
  apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:kube-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
subjects:
- kind: ServiceAccount
  name: tidb-scheduler
  namespace: tidb-admin
roleRef:
  kind: ClusterRole
  name: system:kube-scheduler
  apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:volume-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
subjects:
- kind: ServiceAccount
  name: tidb-scheduler
  namespace: tidb-admin
roleRef:
  kind: ClusterRole
  name: system:volume-scheduler
  apiGroup: rbac.authorization.k8s.io

---
# Source: tidb-operator/templates/controller-manager-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: tidb-controller-manager
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: controller-manager
    helm.sh/chart: tidb-operator-v1.1.7
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: tidb-operator
      app.kubernetes.io/instance: tidb-operator
      app.kubernetes.io/component: controller-manager
  template:
    metadata:
      labels:
        app.kubernetes.io/name: tidb-operator
        app.kubernetes.io/instance: tidb-operator
        app.kubernetes.io/component: controller-manager
    spec:
      serviceAccount: tidb-controller-manager
      containers:
      - name: tidb-operator
        image: pingcap/tidb-operator:v1.1.7
        imagePullPolicy: IfNotPresent
        resources:
            requests:
              cpu: 80m
              memory: 50Mi

        command:
          - /usr/local/bin/tidb-controller-manager
          - -tidb-backup-manager-image=pingcap/tidb-backup-manager:v1.1.7
          - -tidb-discovery-image=pingcap/tidb-operator:v1.1.7
          - -cluster-scoped=true
          - -auto-failover=true
          - -pd-failover-period=5m
          - -tikv-failover-period=5m
          - -tiflash-failover-period=5m
          - -tidb-failover-period=5m
          - -v=2
        env:
          - name: NAMESPACE
            valueFrom:
              fieldRef:
                fieldPath: metadata.namespace
          - name: TZ
            value: UTC


---
# Source: tidb-operator/templates/scheduler-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: tidb-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: tidb-operator
      app.kubernetes.io/instance: tidb-operator
      app.kubernetes.io/component: scheduler
  template:
    metadata:
      labels:
        app.kubernetes.io/name: tidb-operator
        app.kubernetes.io/instance: tidb-operator
        app.kubernetes.io/component: scheduler
    spec:
      serviceAccount: tidb-scheduler
      containers:
      - name: tidb-scheduler
        image: pingcap/tidb-operator:v1.1.7
        imagePullPolicy: IfNotPresent
        resources:
            limits:
              cpu: 250m
              memory: 150Mi
            requests:
              cpu: 80m
              memory: 50Mi

        command:
          - /usr/local/bin/tidb-scheduler
          - -v=2
          - -port=10262
      - name: kube-scheduler
        image: k8s.gcr.io/kube-scheduler:v1.14.0
        imagePullPolicy: IfNotPresent
        resources:
            limits:
              cpu: 250m
              memory: 150Mi
            requests:
              cpu: 80m
              memory: 50Mi

        command:
        - kube-scheduler
        - --port=10261
        - --leader-elect=true
        - --lock-object-name=tidb-scheduler
        - --lock-object-namespace=tidb-admin
        - --scheduler-name=tidb-scheduler
        - --v=2
        - --policy-configmap=tidb-scheduler-policy
        - --policy-configmap-namespace=tidb-admin

2.2.3 创建 tidb-operator

## create tidb-admin namespace
[root@r21 soft]# kubectl create namespace tidb-admin
## create tidb-operator 
[root@r21 soft]# kubectl apply -f tidb-operator-deploy.yaml -n tidb-admin

2.2.4 检查 TiDB-Operator 状态

[root@r21 soft]# kubectl get pods -n tidb-admin
NAME                                       READY   STATUS    RESTARTS   AGE
tidb-controller-manager-5dc78549b7-z89hg   1/1     Running   0          47m
tidb-scheduler-5cfd58d564-977vk            2/2     Running   0          47m

2.2.5 我们还可以做什么

在以上的步骤中我们使用了 yaml 文件创建了 Tidb-Operator,除此之外,我们可以像官方文档那样使用 helm 创建 TiDB-Operator。

那么我为什么要使用 yaml 文件 创建 TiDB-Operator 呢?答案无非是简单,很简单,非常简单。我们不用安装 helm 环境,直接使用 k8s 的 apply 命令就可以部署 TiDB-Operator。

那么官方文档为什么要建议使用 helm 来安装呢?其他的文章已经对 helm 做了比较详细的介绍。对于我来说,helm 最主要有以下两个优势:

  • helm 方便版本管理
  • helm 可以回滚
[root@r21 soft]# wget http://charts.pingcap.org/tidb-operator-v1.1.7.tgz
[root@r21 soft]# tar vxzf tidb-operator-v1.1.7.tgz
[root@r21 soft]# helm install ./tidb-operator --name=tidb-operator --namespace=tidb-admin
NAME:   tidb-operator
LAST DEPLOYED: Wed Nov 25 22:21:51 2020
NAMESPACE: tidb-admin
STATUS: DEPLOYED

RESOURCES:
==> v1/ConfigMap
NAME                   AGE
tidb-scheduler-policy  0s

==> v1/Deployment
NAME                     AGE
tidb-controller-manager  0s
tidb-scheduler           0s

==> v1/Pod(related)
NAME                                      AGE
tidb-controller-manager-5dc78549b7-n49qt  0s
tidb-scheduler-6f78c4d78c-5knp2           0s

==> v1/ServiceAccount
NAME                     AGE
tidb-controller-manager  0s
tidb-scheduler           0s

==> v1beta1/ClusterRole
NAME                                   AGE
tidb-operator:tidb-controller-manager  0s
tidb-operator:tidb-scheduler           0s

==> v1beta1/ClusterRoleBinding
NAME                                   AGE
tidb-operator:kube-scheduler           0s
tidb-operator:tidb-controller-manager  0s
tidb-operator:tidb-scheduler           0s
tidb-operator:volume-scheduler         0s


NOTES:
Make sure tidb-operator components are running:

    kubectl get pods --namespace tidb-admin -l app.kubernetes.io/instance=tidb-operator

2.3 为 TiDB Cluster 创建 PV

2.3.1 创建 PV

以下命令用于创建 RAM Disk 来模拟 Locak Disk

## create 9 local disks in all k8s-nodes 
[root@r22 disks]# for i in `seq 9`; do mkdir -p /mnt/disks/pv0$i; done
[root@r23 disks]# for i in `seq 9`; do mkdir -p /mnt/disks/pv0$i; done

We can use bellowing command to create a pv for TiDB-Cluster

用以下命令为 TiDB Cluster 创建 9 个 PV。创建 多少个 PV 个你的需求有关,但至少创建 9 个吧,防止不够用。

[root@r22 disks]# for i in `seq 9`; do
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolume
metadata:
  name: tidb-cluster-r22-pv0${i}
spec:
  capacity:
    storage: 5Gi
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage
  local:
    path: /mnt/disks/pv0${i}
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - 192.168.10.22
EOF
done


[root@r22 disks]# for i in `seq 9`; do
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolume
metadata:
  name: tidb-cluster-r23-pv0${i}
spec:
  capacity:
    storage: 5Gi
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage
  local:
    path: /mnt/disks/pv0${i}
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - 192.168.10.23
EOF
done

2.3.2 检查 PV 的状态

[root@r21 soft]# kubectl get pv
NAME                    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS    REASON   AGE
tidb-cluster-r22-pv01   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv02   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv03   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv04   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv05   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv06   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv07   5Gi        RWO            Delete           Available           local-storage            41s
tidb-cluster-r22-pv08   5Gi        RWO            Delete           Available           local-storage            41s
tidb-cluster-r22-pv09   5Gi        RWO            Delete           Available           local-storage            41s
tidb-cluster-r23-pv01   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv02   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv03   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv04   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv05   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv06   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv07   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv08   5Gi        RWO            Delete           Available           local-storage            32s
tidb-cluster-r23-pv09   5Gi        RWO            Delete           Available           local-storage            32s

2.3.2 我们还可以做什么

我们还可以使用官网提供的 provisioner 方案来自动创建 PV。

为什么我没有使用这一种方法呢? 在创建 PV 之前,我要问自己我需要自动化的 provisioner 吗?

是否要自动化取决于需求。我搭建的是一个测试环境,只做功能测试,完全没有扩展 PV 的可能性。

同样对于小规模的集群,我也认为手动创建 PV 也是很好的选择。

# exec in master [k8s-master: r21]
[root@r21 soft]# wget https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.7/manifests/local-dind/local-volume-provisioner.yaml

[root@r21 soft]# cat local-volume-provisioner.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: "local-storage"
provisioner: "kubernetes.io/no-provisioner"
volumeBindingMode: "WaitForFirstConsumer"

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: local-provisioner-config
  namespace: kube-system
data:
  setPVOwnerRef: "true"
  nodeLabelsForPV: |
    - kubernetes.io/hostname
  storageClassMap: |
    local-storage:
      hostDir: /mnt/disks
      mountDir: /mnt/disks

---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: local-volume-provisioner
  namespace: kube-system
  labels:
    app: local-volume-provisioner
spec:
  selector:
    matchLabels:
      app: local-volume-provisioner
  template:
    metadata:
      labels:
        app: local-volume-provisioner
    spec:
      serviceAccountName: local-storage-admin
      containers:
        - image: "quay.io/external_storage/local-volume-provisioner:v2.3.4"
          #command: [ "/bin/bash", "-ce", "tail -f /dev/null" ]
          name: provisioner
          securityContext:
            privileged: true

          env:
          - name: MY_NODE_NAME
            valueFrom:
              fieldRef:
                fieldPath: spec.nodeName
          - name: MY_NAMESPACE
            valueFrom:
              fieldRef:
                fieldPath: metadata.namespace
          - name: JOB_CONTAINER_IMAGE
            value: "quay.io/external_storage/local-volume-provisioner:v2.3.4"
          resources:
            requests:
              cpu: 100m
              memory: 100Mi
            limits:
              cpu: 100m
              memory: 100Mi
          volumeMounts:
            - mountPath: /etc/provisioner/config
              name: provisioner-config
              readOnly: true
            # mounting /dev in DinD environment would fail
            # - mountPath: /dev
            #   name: provisioner-dev
            - mountPath: /mnt/disks
              name: local-disks
              mountPropagation: "HostToContainer"
      volumes:
        - name: provisioner-config
          configMap:
            name: local-provisioner-config
        # - name: provisioner-dev
        #   hostPath:
        #     path: /dev
        - name: local-disks
          hostPath:
            path: /mnt/disks

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: local-storage-admin
  namespace: kube-system

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: local-storage-provisioner-pv-binding
  namespace: kube-system
subjects:
- kind: ServiceAccount
  name: local-storage-admin
  namespace: kube-system
roleRef:
  kind: ClusterRole
  name: system:persistent-volume-provisioner
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: local-storage-provisioner-node-clusterrole
  namespace: kube-system
rules:
- apiGroups: [""]
  resources: ["nodes"]
  verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: local-storage-provisioner-node-binding
  namespace: kube-system
subjects:
- kind: ServiceAccount
  name: local-storage-admin
  namespace: kube-system
roleRef:
  kind: ClusterRole
  name: local-storage-provisioner-node-clusterrole
  apiGroup: rbac.authorization.k8s.io


[root@r21 soft]# kubectl apply -f local-volume-provisioner.yaml
storageclass.storage.k8s.io/local-storage created
configmap/local-provisioner-config created
daemonset.apps/local-volume-provisioner created
serviceaccount/local-storage-admin created
clusterrolebinding.rbac.authorization.k8s.io/local-storage-provisioner-pv-binding created
clusterrole.rbac.authorization.k8s.io/local-storage-provisioner-node-clusterrole created
clusterrolebinding.rbac.authorization.k8s.io/local-storage-provisioner-node-binding created

3. 部署并测试 TiDB Cluster

3.1 部署 TiDB Cluster

3.1.1 下载 TiDB cluster 的 docker images

docker pull pingcap/pd:v4.0.8
docker pull pingcap/tikv:v4.0.8
docker pull pingcap/tidb:v4.0.8
docker pull pingcap/tidb-binlog:v4.0.8
docker pull pingcap/ticdc:v4.0.8
docker pull pingcap/tiflash:v4.0.8
docker pull pingcap/tidb-monitor-reloader:v1.0.1
docker pull pingcap/tidb-monitor-initializer:v4.0.8
docker pull grafana/grafana:6.0.1
docker pull prom/prometheus:v2.18.1
docker pull busybox:1.26.2

docker save -o pd-v4.0.8.tar pingcap/pd:v4.0.8
docker save -o tikv-v4.0.8.tar pingcap/tikv:v4.0.8
docker save -o tidb-v4.0.8.tar pingcap/tidb:v4.0.8
docker save -o tidb-binlog-v4.0.8.tar pingcap/tidb-binlog:v4.0.8
docker save -o ticdc-v4.0.8.tar pingcap/ticdc:v4.0.8
docker save -o tiflash-v4.0.8.tar pingcap/tiflash:v4.0.8
docker save -o tidb-monitor-reloader-v1.0.1.tar pingcap/tidb-monitor-reloader:v1.0.1
docker save -o tidb-monitor-initializer-v4.0.8.tar pingcap/tidb-monitor-initializer:v4.0.8
docker save -o grafana-6.0.1.tar grafana/grafana:6.0.1
docker save -o prometheus-v2.18.1.tar prom/prometheus:v2.18.1
docker save -o busybox-1.26.2.tar busybox:1.26.2


docker load -i pd-v4.0.8.tar
docker load -i tikv-v4.0.8.tar
docker load -i tidb-v4.0.8.tar
docker load -i tidb-binlog-v4.0.8.tar
docker load -i ticdc-v4.0.8.tar
docker load -i tiflash-v4.0.8.tar
docker load -i tidb-monitor-reloader-v1.0.1.tar
docker load -i tidb-monitor-initializer-v4.0.8.tar
docker load -i grafana-6.0.1.tar
docker load -i prometheus-v2.18.1.tar
docker load -i busybox-1.26.2.tar

3.1.2 下载 TiDB 的部署 yaml 文件

## Download TiDB deployment yaml file from bellowint link
[root@r21 soft]# wget https://github.com/pingcap/tidb-operator/blob/v1.1.7/examples/advanced/tidb-cluster.yaml

## modify the TiDB deployment yaml file
[root@r21 soft]# cat tidb-cluster-sample.yaml
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster
metadata:
  name: mycluster                    
  namespace: mycluster                

spec:
  version: "v4.0.8"
  timezone: UTC
  hostNetwork: false
  imagePullPolicy: IfNotPresent

  enableDynamicConfiguration: true

  pd:
    baseImage: pingcap/pd
    config: {}
    replicas: 1
    requests:
      cpu: "100m"
      storage: 1Gi
    mountClusterClientSecret: false
    storageClassName: "local-storage"            

  tidb:
    baseImage: pingcap/tidb
    replicas: 1
    requests:
      cpu: "100m"
    config: {}

  tikv:
    baseImage: pingcap/tikv
    config: {}
    replicas: 1
    requests:
      cpu: "100m"
      storage: 1Gi
    mountClusterClientSecret: false
    storageClassName: "local-storage"          
  enablePVReclaim: false
  pvReclaimPolicy: Retain
  tlsCluster: {}

3.1.3 创建 TiDB Cluster

[root@r21 ~]# kubectl create namespace mycluster
[root@r21 ~]# kubectl apply -f tidb-cluster-sample.yaml

3.1.4 检查 TiDB cluster 状态

[root@r21 soft]# kubectl get pods -n mycluster -o wide
NAME                                   READY   STATUS    RESTARTS   AGE     IP          NODE            NOMINATED NODE   READINESS GATES
mycluster-discovery-5b7ff9f94f-jdjqp   1/1     Running   0          6m32s   10.0.97.5   192.168.10.21   <none>           <none>
mycluster-pd-0                         1/1     Running   0          6m32s   10.0.97.6   192.168.10.21   <none>           <none>
mycluster-tidb-0                       2/2     Running   0          5m10s   10.0.47.6   192.168.10.22   <none>           <none>
mycluster-tikv-0                       1/1     Running   0          6m5s    10.0.47.5   192.168.10.22   <none>           <none>

3.1.5 暴露TiDB Cluster

We can modify the tidb-cluster-sample.yaml file as following

  tidb:
    baseImage: pingcap/tidb
    replicas: 1
    requests:
      cpu: "100m"
    config: {}
    service:
      externalTrafficPolicy: Cluster
      type: NodePort
      mysqlNodePort: 30020
      statusNodePort: 30040
      
      
 [root@r21 ~]# kubectl replace -f tidb-cluster-sample.yaml

3.1.6 检查TiDB endpoint

[root@k8s-master soft]# kubectl get all -n mycluster
NAME                                       READY   STATUS    RESTARTS   AGE
pod/mycluster-discovery-5b7ff9f94f-nfjkn   1/1     Running   1          3h11m
pod/mycluster-pd-0                         1/1     Running   0          144m
pod/mycluster-tidb-0                       2/2     Running   0          136m
pod/mycluster-tikv-0                       1/1     Running   0          143m

NAME                          TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                          AGE
service/mycluster-discovery   ClusterIP   10.0.0.97    <none>        10261/TCP,10262/TCP              3h11m
service/mycluster-pd          ClusterIP   10.0.0.20    <none>        2379/TCP                         3h11m
service/mycluster-pd-peer     ClusterIP   None         <none>        2380/TCP                         3h11m
service/mycluster-tidb        NodePort    10.0.0.105   <none>        4000:30020/TCP,10080:30040/TCP   30m
service/mycluster-tidb-peer   ClusterIP   None         <none>        10080/TCP                        136m
service/mycluster-tikv-peer   ClusterIP   None         <none>        20160/TCP                        3h10m

NAME                                  READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/mycluster-discovery   1/1     1            1           3h11m

NAME                                             DESIRED   CURRENT   READY   AGE
replicaset.apps/mycluster-discovery-5b7ff9f94f   1         1         1       3h11m

NAME                              READY   AGE
statefulset.apps/mycluster-pd     1/1     3h11m
statefulset.apps/mycluster-tidb   1/1     136m
statefulset.apps/mycluster-tikv   1/1     3h10m

3.2 测试 TiDB Cluster

3.2.1 使用 mysql 客户端连接 TiDB

[root@r20 ~]# mysql -uroot -h192.168.10.21 -P30020
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MySQL connection id is 738
Server version: 5.7.25-TiDB-v4.0.8 TiDB Server (Apache License 2.0) Community Edition, MySQL 5.7 compatible

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MySQL [(none)]> exit
Bye


[root@r20 ~]# mysql -uroot -h192.168.10.22 -P30020
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MySQL connection id is 926
Server version: 5.7.25-TiDB-v4.0.8 TiDB Server (Apache License 2.0) Community Edition, MySQL 5.7 compatible

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MySQL [(none)]> exit
Bye

4 个赞