【SOP 系列 39】通过 TiOperator 部署 TiDB

感谢 @清风明月 的贡献

部署TiDB Operator

下载TiDB Cluster CRD部署文件

wget https://raw.githubusercontent.com/pingcap/tidb-operator/master/manifests/crd.yaml

创建TiDB Cluster CRD

kubectl apply -f crd.yaml
customresourcedefinition.apiextensions.k8s.io/tidbclusters.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/backups.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/restores.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/backupschedules.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbmonitors.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbinitializers.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbclusterautoscalers.pingcap.com created

检查CRD状态

[root@k8s-master ~]# kubectl get crd |grep ping
backups.pingcap.com                         2022-03-28T07:46:10Z
backupschedules.pingcap.com                 2022-03-28T07:46:10Z
dmclusters.pingcap.com                      2022-03-28T07:46:11Z
restores.pingcap.com                        2022-03-28T07:46:11Z
tidbclusterautoscalers.pingcap.com          2022-03-28T07:46:11Z
tidbclusters.pingcap.com                    2022-03-28T07:46:11Z
tidbinitializers.pingcap.com                2022-03-28T07:46:12Z
tidbmonitors.pingcap.com                    2022-03-28T07:46:12Z
tidbngmonitorings.pingcap.com               2022-03-28T07:46:12Z

安装配置TiDB Operator

方法一:通过yaml

下载TiDB Operator的docker iamge

docker pull pingcap/tidb-operator:v1.1.7
docker pull pingcap/tidb-backup-manager:v1.1.7
docker pull pingcap/advanced-statefulset:v0.3.3


mkdir -p /opt/soft/docker-image
docker save -o tidb-backup-manager.tar pingcap/tidb-backup-manager
docker save -o tidb-operator.tar pingcap/tidb-operator
docker save -o advanced-statefulset.tar pingcap/advanced-statefulset

创建tidb-operator部署文件

cat tidb-operator-deploy.yaml

-----------------------------------------------------------------------------------
# Source: tidb-operator/templates/scheduler-policy-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: tidb-scheduler-policy
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
data:
  policy.cfg: |-
    {
      "kind" : "Policy",
      "apiVersion" : "v1",
      "predicates": [
        {"name": "NoVolumeZoneConflict"},
        {"name": "MaxEBSVolumeCount"},
        {"name": "MaxAzureDiskVolumeCount"},
        {"name": "NoDiskConflict"},
        {"name": "GeneralPredicates"},
        {"name": "PodToleratesNodeTaints"},
        {"name": "CheckVolumeBinding"},
        {"name": "MaxGCEPDVolumeCount"},
        {"name": "MatchInterPodAffinity"},
        {"name": "CheckVolumeBinding"}
      ],
      "priorities": [
        {"name": "SelectorSpreadPriority", "weight": 1},
        {"name": "InterPodAffinityPriority", "weight": 1},
        {"name": "LeastRequestedPriority", "weight": 1},
        {"name": "BalancedResourceAllocation", "weight": 1},
        {"name": "NodePreferAvoidPodsPriority", "weight": 1},
        {"name": "NodeAffinityPriority", "weight": 1},
        {"name": "TaintTolerationPriority", "weight": 1}
      ],
      "extenders": [
        {
          "urlPrefix": "http://127.0.0.1:10262/scheduler",
          "filterVerb": "filter",
          "preemptVerb": "preempt",
          "weight": 1,
          "httpTimeout": 30000000000,
          "enableHttps": false
        }
      ]
    }


---
# Source: tidb-operator/templates/controller-manager-rbac.yaml

kind: ServiceAccount
apiVersion: v1
metadata:
  name: tidb-controller-manager
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: controller-manager
    helm.sh/chart: tidb-operator-v1.1.7
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:tidb-controller-manager
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: controller-manager
    helm.sh/chart: tidb-operator-v1.1.7
rules:
- apiGroups: [""]
  resources:
  - services
  - events
  verbs: ["*"]
- apiGroups: [""]
  resources: ["endpoints","configmaps"]
  verbs: ["create", "get", "list", "watch", "update","delete"]
- apiGroups: [""]
  resources: ["serviceaccounts"]
  verbs: ["create","get","update","delete"]
- apiGroups: ["batch"]
  resources: ["jobs"]
  verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["create", "update", "get", "list", "watch","delete"]
- apiGroups: [""]
  resources: ["persistentvolumeclaims"]
  verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
  resources: ["pods"]
  verbs: ["get", "list", "watch","update", "delete"]
- apiGroups: ["apps"]
  resources: ["statefulsets","deployments", "controllerrevisions"]
  verbs: ["*"]
- apiGroups: ["extensions"]
  resources: ["ingresses"]
  verbs: ["*"]
- apiGroups: ["apps.pingcap.com"]
  resources: ["statefulsets", "statefulsets/status"]
  verbs: ["*"]
- apiGroups: ["pingcap.com"]
  resources: ["*"]
  verbs: ["*"]
- nonResourceURLs: ["/metrics"]
  verbs: ["get"]
- apiGroups: [""]
  resources: ["nodes"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["persistentvolumes"]
  verbs: ["get", "list", "watch", "patch","update"]
- apiGroups: ["storage.k8s.io"]
  resources: ["storageclasses"]
  verbs: ["get", "list", "watch"]

- apiGroups: ["rbac.authorization.k8s.io"]
  resources: [clusterroles,roles]
  verbs: ["escalate","create","get","update", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
  resources: ["rolebindings","clusterrolebindings"]
  verbs: ["create","get","update", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:tidb-controller-manager
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: controller-manager
    helm.sh/chart: tidb-operator-v1.1.7
subjects:
- kind: ServiceAccount
  name: tidb-controller-manager
  namespace: tidb-admin
roleRef:
  kind: ClusterRole
  name: tidb-operator:tidb-controller-manager
  apiGroup: rbac.authorization.k8s.io

---
# Source: tidb-operator/templates/scheduler-rbac.yaml

kind: ServiceAccount
apiVersion: v1
metadata:
  name: tidb-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:tidb-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
rules:
# ConfigMap permission for --policy-configmap
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["pods"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["nodes"]
  verbs: ["get", "list"]
- apiGroups: ["pingcap.com"]
  resources: ["tidbclusters"]
  verbs: ["get"]
- apiGroups: [""]
  resources: ["persistentvolumeclaims"]
  verbs: ["get", "list", "update"]
# Extra permissions for endpoints other than kube-scheduler
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["delete", "get", "patch", "update"]
- apiGroups: ["coordination.k8s.io"]
  resources: ["leases"]
  verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
  resources: ["leases"]
  resourceNames: ["tidb-scheduler"]
  verbs: ["get", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:tidb-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
subjects:
- kind: ServiceAccount
  name: tidb-scheduler
  namespace: tidb-admin
roleRef:
  kind: ClusterRole
  name: tidb-operator:tidb-scheduler
  apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:kube-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
subjects:
- kind: ServiceAccount
  name: tidb-scheduler
  namespace: tidb-admin
roleRef:
  kind: ClusterRole
  name: system:kube-scheduler
  apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: tidb-operator:volume-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
subjects:
- kind: ServiceAccount
  name: tidb-scheduler
  namespace: tidb-admin
roleRef:
  kind: ClusterRole
  name: system:volume-scheduler
  apiGroup: rbac.authorization.k8s.io

---
# Source: tidb-operator/templates/controller-manager-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: tidb-controller-manager
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: controller-manager
    helm.sh/chart: tidb-operator-v1.1.7
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: tidb-operator
      app.kubernetes.io/instance: tidb-operator
      app.kubernetes.io/component: controller-manager
  template:
    metadata:
      labels:
        app.kubernetes.io/name: tidb-operator
        app.kubernetes.io/instance: tidb-operator
        app.kubernetes.io/component: controller-manager
    spec:
      serviceAccount: tidb-controller-manager
      containers:
      - name: tidb-operator
        image: pingcap/tidb-operator:v1.1.7
        imagePullPolicy: IfNotPresent
        resources:
            requests:
              cpu: 80m
              memory: 50Mi

        command:
          - /usr/local/bin/tidb-controller-manager
          - -tidb-backup-manager-image=pingcap/tidb-backup-manager:v1.1.7
          - -tidb-discovery-image=pingcap/tidb-operator:v1.1.7
          - -cluster-scoped=true
          - -auto-failover=true
          - -pd-failover-period=5m
          - -tikv-failover-period=5m
          - -tiflash-failover-period=5m
          - -tidb-failover-period=5m
          - -v=2
        env:
          - name: NAMESPACE
            valueFrom:
              fieldRef:
                fieldPath: metadata.namespace
          - name: TZ
            value: UTC


---
# Source: tidb-operator/templates/scheduler-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: tidb-scheduler
  labels:
    app.kubernetes.io/name: tidb-operator
    app.kubernetes.io/managed-by: Tiller
    app.kubernetes.io/instance: tidb-operator
    app.kubernetes.io/component: scheduler
    helm.sh/chart: tidb-operator-v1.1.7
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: tidb-operator
      app.kubernetes.io/instance: tidb-operator
      app.kubernetes.io/component: scheduler
  template:
    metadata:
      labels:
        app.kubernetes.io/name: tidb-operator
        app.kubernetes.io/instance: tidb-operator
        app.kubernetes.io/component: scheduler
    spec:
      serviceAccount: tidb-scheduler
      containers:
      - name: tidb-scheduler
        image: pingcap/tidb-operator:v1.1.7
        imagePullPolicy: IfNotPresent
        resources:
            limits:
              cpu: 250m
              memory: 150Mi
            requests:
              cpu: 80m
              memory: 50Mi

        command:
          - /usr/local/bin/tidb-scheduler
          - -v=2
          - -port=10262
      - name: kube-scheduler
        image: k8s.gcr.io/kube-scheduler:v1.14.0
        imagePullPolicy: IfNotPresent
        resources:
            limits:
              cpu: 250m
              memory: 150Mi
            requests:
              cpu: 80m
              memory: 50Mi

        command:
        - kube-scheduler
        - --port=10261
        - --leader-elect=true
        - --lock-object-name=tidb-scheduler
        - --lock-object-namespace=tidb-admin
        - --scheduler-name=tidb-scheduler
        - --v=2
        - --policy-configmap=tidb-scheduler-policy
        - --policy-configmap-namespace=tidb-admin

创建tidb-operator

## create tidb-admin namespace
[root@r21 soft]# kubectl create namespace tidb-admin
## create tidb-operator 
[root@r21 soft]# kubectl apply -f tidb-operator-deploy.yaml -n tidb-admin

检查tidb-operator状态

[root@k8s-master tidb-operator]# kubectl get pods -n tidb-admin
NAME                                       READY   STATUS    RESTARTS   AGE
tidb-controller-manager-6fb99fdb64-5zssz   1/1     Running   28         13d
tidb-scheduler-9f9c785c5-x42b4             2/2     Running   17         29d

方法二:通过helm

添加pingcap仓库

helm repo add pingcap https://charts.pingcap.org/

创建命名空间

kubectl create namespace tidb-admin

安装tidb-operator

helm install --namespace tidb-admin tidb-operator pingcap/tidb-operator --version v1.3.3 \
    --set operatorImage=registry.cn-beijing.aliyuncs.com/tidb/tidb-operator:v1.3.3 \
    --set tidbBackupManagerImage=registry.cn-beijing.aliyuncs.com/tidb/tidb-backup-manager:v1.3.3 \
    --set scheduler.kubeSchedulerImageName=registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler

检查tidb-operator是否正常运行

[root@k8s-master tidb-operator]# kubectl get pods --namespace tidb-admin -l app.kubernetes.io/instance=tidb-operator
NAME                                       READY   STATUS    RESTARTS   AGE
tidb-controller-manager-6fb99fdb64-5zssz   1/1     Running   28         13d
tidb-scheduler-9f9c785c5-x42b4             2/2     Running   17         29d

1、创建pv

mount --bind /home/data/data1/ /home/data/data1/
mount --bind /home/data/data2/ /home/data/data2/
mount --bind /home/data/data3/ /home/data/data3/

为TiDB Cluster创建PV

创建PV有两种方式:一种是人工管理 PV 的方式就叫作 Static Provisioning;另一种是Dynamic Provisioning动态创建存储卷,动态供给的关键就是StorageClass,它的作用就是创建PV模板。目前最常用的是Local PV,可使用 local-static-provisioner 项目中的 local-volume-provisioner 程序创建本地存储对象

方式一:人工管理Static Provisioning方式

创建PV

1、在不同的work节点上创建不同的目录,有几个计算节点分别在节点上执行
for i in `seq 9`; do mkdir -p /home/data/pv0$i; done
2、在K8S的master节点上执行创建pv的语句.

[root@r22 disks]# for i in `seq 9`; do
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolume
metadata:
  name: tidb-cluster-r22-pv0${i}
spec:
  capacity:
    storage: 50Gi
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage
  local:
    path: /home/data/pv0${i}
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - 172.16.4.121
EOF
done


[root@r22 disks]# for i in `seq 9`; do
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolume
metadata:
  name: tidb-cluster-r24-pv0${i}
spec:
  capacity:
    storage: 50Gi
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage
  local:
    path: /home/data/pv0${i}
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - 172.16.4.203
EOF
done

[root@r22 disks]# for i in `seq 9`; do
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolume
metadata:
  name: tidb-cluster-r23-pv0${i}
spec:
  capacity:
    storage: 50Gi
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage
  local:
    path: /home/data/pv0${i}
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - 172.16.7.210
EOF
done

检查PV状态

[root@r22 soft]# kubectl get pv
NAME                    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS    REASON   AGE
tidb-cluster-r22-pv01   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv02   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv03   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv04   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv05   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv06   5Gi        RWO            Delete           Available           local-storage            42s
tidb-cluster-r22-pv07   5Gi        RWO            Delete           Available           local-storage            41s
tidb-cluster-r22-pv08   5Gi        RWO            Delete           Available           local-storage            41s
tidb-cluster-r22-pv09   5Gi        RWO            Delete           Available           local-storage            41s
tidb-cluster-r23-pv01   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv02   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv03   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv04   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv05   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv06   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv07   5Gi        RWO            Delete           Available           local-storage            33s
tidb-cluster-r23-pv08   5Gi        RWO            Delete           Available           local-storage            32s
tidb-cluster-r23-pv09   5Gi        RWO            Delete           Available           local-storage            32s

方法二、通过local-volume-provisioner

准备本地存储

##依次在K8S的各个计算节点依次执行
1、创建对应的目录
 mkdir /home/data/{data1,data2,data3}
 2、依次挂载对应的节点
 mount --bind /home/data/data1/ /home/data/data1/
 mount --bind /home/data/data2/ /home/data/data2/
 mount --bind /home/data/data3/ /home/data/data3/
 ###
 上述的/home/data/data1、/home/data/data2、/home/data/data13是 local-volume-provisioner
 使用的发现目录(discovery directory),local-volume-provisioner 会为发现目录下的每一个
 子目录创建对应的 PV

下载和配置 local-volume-provisioner

1、下载
wget https://raw.githubusercontent.com/pingcap/tidb-operator/master/examples/local-pv/local-volume-provisioner.yaml

2、修改不同的路径。如果你使用与上一步中不同路径的发现目录,需要修改 ConfigMap 和 DaemonSet 定义。
#cat local-volume-provisioner.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: "local-storage"
provisioner: "kubernetes.io/no-provisioner"
volumeBindingMode: "WaitForFirstConsumer"
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: local-provisioner-config
  namespace: kube-system
data:
  setPVOwnerRef: "true"
  nodeLabelsForPV: |
    - kubernetes.io/hostname
  storageClassMap: |
    local-storage:
      hostDir: /home/data
      mountDir: /data
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: local-volume-provisioner
  namespace: kube-system
  labels:
    app: local-volume-provisioner
spec:
  selector:
    matchLabels:
      app: local-volume-provisioner
  template:
    metadata:
      labels:
        app: local-volume-provisioner
    spec:
      serviceAccountName: local-storage-admin
      containers:
        - image: "quay.io/external_storage/local-volume-provisioner:v2.3.4"
          name: provisioner
          securityContext:
            privileged: true
          env:
          - name: MY_NODE_NAME
            valueFrom:
              fieldRef:
                fieldPath: spec.nodeName
          - name: MY_NAMESPACE
            valueFrom:
              fieldRef:
                fieldPath: metadata.namespace
          - name: JOB_CONTAINER_IMAGE
            value: "quay.io/external_storage/local-volume-provisioner:v2.3.4"
          resources:
            requests:
              cpu: 100m
              memory: 100Mi
            limits:
              cpu: 100m
              memory: 100Mi
          volumeMounts:
            - mountPath: /etc/provisioner/config
              name: provisioner-config
              readOnly: true
            - mountPath: /data
              name: local-disks
              mountPropagation: "HostToContainer"
      volumes:
        - name: provisioner-config
          configMap:
            name: local-provisioner-config
        - name: local-disks
          hostPath:
            path: /home/data
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: local-storage-admin
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: local-storage-provisioner-pv-binding
  namespace: kube-system
subjects:
- kind: ServiceAccount
  name: local-storage-admin
  namespace: kube-system
roleRef:
  kind: ClusterRole
  name: system:persistent-volume-provisioner
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: local-storage-provisioner-node-clusterrole
  namespace: kube-system
rules:
- apiGroups: [""]
  resources: ["nodes"]
  verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: local-storage-provisioner-node-binding
  namespace: kube-system
subjects:
- kind: ServiceAccount
  name: local-storage-admin
  namespace: kube-system
roleRef:
  kind: ClusterRole
  name: local-storage-provisioner-node-clusterrole
  apiGroup: rbac.authorization.k8s.io

部署和检查 local-volume-provisioner 程序.

部署完成之后会在挂载点创建对应的PV

1、部署
[root@k8s-master tidb]# kubectl apply -f local-volume-provisioner.yaml
storageclass.storage.k8s.io/local-storage unchanged
configmap/local-provisioner-config unchanged
daemonset.apps/local-volume-provisioner unchanged
serviceaccount/local-storage-admin unchanged
clusterrolebinding.rbac.authorization.k8s.io/local-storage-provisioner-pv-binding unchanged
clusterrole.rbac.authorization.k8s.io/local-storage-provisioner-node-clusterrole unchanged
clusterrolebinding.rbac.authorization.k8s.io/local-storage-provisioner-node-binding unchanged

2、检查pv和pod状态
[root@k8s-master tidb]# kubectl get po -n kube-system -l app=local-volume-provisioner && kubectl get pv
NAME                             READY   STATUS    RESTARTS   AGE
local-volume-provisioner-dv8td   1/1     Running   0          3m53s
local-volume-provisioner-p5xxz   1/1     Running   0          3m53s
local-volume-provisioner-xc7h7   1/1     Running   1          3m53s
NAME                             CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                                                     STORAGECLASS               REASON   AGE
local-pv                         5Gi        RWO            Delete           Bound       pvc/local-pvc                                             local-pv                            34d
local-pv-266e82b                 446Gi      RWO            Delete           Available                                                             ssd-storage                         3m52s
local-pv-5e0c8732                446Gi      RWO            Delete           Available                                                             ssd-storage                         3m52s
local-pv-740db545                446Gi      RWO            Retain           Bound       tidb/tikv-lqbyz-01-tikv-0                                 ssd-storage                         3m52s
local-pv-940c400c                446Gi      RWO            Retain           Bound       tidb/pd-lqbyz-01-pd-0                                     ssd-storage                         3m52s
local-pv-976ea2a9                49Gi       RWO            Delete           Available                                                             ssd-storage                         3m51s
local-pv-aab278a2                446Gi      RWO            Delete           Available                                                             ssd-storage                         3m52s
local-pv-e10ff5b6                49Gi       RWO            Delete           Available                                                             ssd-storage                         3m51s
local-pv-e45a90b5                446Gi      RWO            Delete           Available                                                             ssd-storage                         3m52s
local-pv-fd9e6dff                49Gi       RWO            Delete           Available                                                             ssd-storage                         3m51s   

部署和测试TiDB Cluster

下载相关的docker相关的镜像

docker pull pingcap/pd:v5.4.1
docker pull pingcap/tikv:v5.4.1
docker pull pingcap/tidb:v5.4.1
docker pull pingcap/tidb-binlog:v5.4.1
docker pull pingcap/ticdc:v5.4.1
docker pull pingcap/tiflash:v5.4.1
docker pull pingcap/tidb-monitor-reloader:v1.0.1
docker pull pingcap/tidb-monitor-initializer:v5.4.1
docker pull grafana/grafana:6.0.1
docker pull prom/prometheus:v2.18.1
docker pull busybox:1.26.2

docker save -o pd-v5.4.1.tar pingcap/pd:v5.4.1
docker save -o tikv-v5.4.1.tar pingcap/tikv:v5.4.1
docker save -o tidb-v5.4.1.tar pingcap/tidb:v5.4.1
docker save -o tidb-binlog-v5.4.1.tar pingcap/tidb-binlog:v5.4.1
docker save -o ticdc-v5.4.1.tar pingcap/ticdc:v5.4.1
docker save -o tiflash-v5.4.1.tar pingcap/tiflash:v5.4.1
docker save -o tidb-monitor-reloader-v1.0.1.tar pingcap/tidb-monitor-reloader:v1.0.1
docker save -o tidb-monitor-initializer-v5.4.1.tar pingcap/tidb-monitor-initializer:v5.4.1
docker save -o grafana-6.0.1.tar grafana/grafana:6.0.1
docker save -o prometheus-v2.18.1.tar prom/prometheus:v2.18.1
docker save -o busybox-1.26.2.tar busybox:1.26.2

下载 TiDB 的部署 yaml 文件

 wget https://github.com/pingcap/tidb-operator/blob/v1.1.7/examples/advanced/tidb-cluster.yaml

对相关的配置进行编辑修改

[root@k8s-master tidb]# cat 2.yaml 
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster 
metadata:
  name: mycluster
  namespace: tidb

spec:
  version: "v4.0.8"
  timezone: Asia/Shanghai
  hostNetwork: false
  imagePullPolicy: IfNotPresent

  enableDynamicConfiguration: true

  pd:
    baseImage: pingcap/pd
    config: {}
    replicas: 3
    requests:
      cpu: "100m"
      storage: 12Gi
    mountClusterClientSecret: false
    storageClassName: "local-storage"

  tidb:
    baseImage: pingcap/tidb
    replicas: 2
    requests:
      cpu: "100m"
    config: {}
    service:
      externalTrafficPolicy: Cluster
      type: NodePort
      mysqlNodePort: 30020
      statusNodePort: 30040

  tikv:
    baseImage: pingcap/tikv
    config: {}
    replicas: 3
    requests:
      cpu: "100m"
      storage: 12Gi
    mountClusterClientSecret: false
    storageClassName: "local-storage"
  enablePVReclaim: false
  pvReclaimPolicy: Retain
  tlsCluster: {}

创建tidb cluster并查看其状态

[root@k8s-master tidb]# kubectl create namespace mycluster
[root@k8s-master tidb]# kubectl apply -f tidb-cluster-sample.yaml
[root@k8s-master tidb]#  kubectl get pods -ntidb -o wide
NAME                                 READY   STATUS    RESTARTS   AGE   IP             NODE        NOMINATED NODE   READINESS GATES
mycluster-discovery-9d4fd98f-vjmxr   1/1     Running   0          29h   10.244.2.166   k8s-node2   <none>           <none>
mycluster-pd-0                       1/1     Running   0          29h   10.244.3.49    k8s-node1   <none>           <none>
mycluster-pd-1                       1/1     Running   1          28h   10.244.3.51    k8s-node1   <none>           <none>
mycluster-pd-2                       1/1     Running   0          28h   10.244.3.52    k8s-node1   <none>           <none>
mycluster-tidb-0                     2/2     Running   0          28h   10.244.2.170   k8s-node2   <none>           <none>
mycluster-tidb-1                     2/2     Running   0          28h   10.244.3.50    k8s-node1   <none>           <none>
mycluster-tikv-0                     1/1     Running   0          28h   10.244.2.167   k8s-node2   <none>           <none>
mycluster-tikv-1                     1/1     Running   0          28h   10.244.2.168   k8s-node2   <none>           <none>
mycluster-tikv-2                     1/1     Running   0          28h   10.244.2.169   k8s-node2   <none>           <none>

查看tidb的链接地址

[root@k8s-master ~]# kubectl get svc -n tidb
NAME                  TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                          AGE
mycluster-discovery   ClusterIP   10.109.245.112   <none>        10261/TCP,10262/TCP              29h
mycluster-pd          ClusterIP   10.110.11.225    <none>        2379/TCP                         29h
mycluster-pd-peer     ClusterIP   None             <none>        2380/TCP,2379/TCP                29h
mycluster-tidb        NodePort    10.107.15.116    <none>        4000:30020/TCP,10080:30040/TCP   29h
mycluster-tidb-peer   ClusterIP   None             <none>        10080/TCP                        29h
mycluster-tikv-peer   ClusterIP   None             <none>        20160/TCP                        29h

[root@k8s-master tidb]# mysql -uroot -h172.16.4.169 -P30020
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MySQL connection id is 10455
Server version: 5.7.25-TiDB-v4.0.8 TiDB Server (Apache License 2.0) Community Edition, MySQL 5.7 compatible

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MySQL [(none)]> 

还没在docker下安装过,找时间玩玩。mac 16G内存能跑起来不?

https://docs.pingcap.com/zh/tidb/stable/hardware-and-software-requirements

查了一下相关的文档,资源配置要参考这个:

生产:

自己玩

1 个赞