打开APP
userphoto
未登录

开通VIP,畅享免费电子书等14项超值服

开通VIP
k8s笔记011-statefulset部署(以zookeeper集群为例)



Deployment部署的是无状态应用,如果是有状态应用,就需要使用statefuleset来部署了。
对于有状态应用和无状态应用,简单点理解就是有状态应用需要数据存储持久化,而无状态应用不保存数据,可被任意调度。

1. 部署nfs服务器
1.1 安装nfs
[root@k8s-node01 ~]# yum -y install nfs
1.2 启动nfs服务
[root@k8s-node01 ~]# systemctl enable nfs
[root@k8s-node01 ~]# systemctl start nfs
1.3 配置nfs共享目录
[root@k8s-node01 ~]# fdisk /dev/sdb
[root@k8s-node01 ~]# mkfs.ext4 /dev/sdb1
[root@k8s-node01 ~]# mkdir /storage
[root@k8s-node01 ~]# mount /dev/sdb1 /storage
[root@k8s-node01 ~]# vi /etc/exports
/storage *(rw,sync,no_root_squash,no_all_squash)
[root@k8s-node01 ~]# exportfs -r

2. 部署nfs客户端
2.1 在每个节点上安装nfs
[root@k8s-node02 ~]# yum -y install nfs

3. 制作zookeeper镜像
3.1 编写Dockerfile文件
3.2 编写其他脚本
3.3 制作镜像
[root@harbor zookeeper]# docker build -t 192.168.1.170/tzg-prod/zookeeper:170915 .
[root@harbor zookeeper]# docker tag 192.168.1.170/tzg-prod/zookeeper:170915 192.168.1.170/tzg-prod/zookeeper
3.4 上传镜像到私有仓库
[root@harbor zookeeper]# docker push 192.168.1.170/tzg-prod/zookeeper

4. 在nfs共享目录中创建zookeeper集群节点各自的data目录
[root@k8s-node01 ~]# mkdir /storage/{zk-0,zk-1,zk-2}

5. 创建PersistentVolume
[root@k8s-master01 zk-cluster]# vim zookeeper-persistentvolume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: zkdatadir-zk-0
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
nfs:
server: 172.18.0.144
path: /storage/zk-0
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: zkdatadir-zk-1
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
nfs:
server: 172.18.0.144
path: /storage/zk-1
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: zkdatadir-zk-2
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
nfs:
server: 172.18.0.144
path: /storage/zk-2
[root@k8s-master01 zk-cluster]# kubectl create -f zookeeper-persistentvolume.yaml

6. 创建PersistentVolumeClaim
[root@k8s-master01 zk-cluster]# vim zookeeper-persistentvolumeclaim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zkdatadir-zk-0
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zkdatadir-zk-1
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zkdatadir-zk-2
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
[root@k8s-master01 zk-cluster]# kubectl create -f zookeeper-persistentvolumeclaim.yaml

7. 查看创建的PV和PVC
[root@k8s-master01 zk-cluster]# kubectl get pv
NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM STORAGECLASS REASON AGE
zkdatadir-zk-0 20Gi RWO Retain Bound default/zkdatadir-zk-1 1h
zkdatadir-zk-1 20Gi RWO Retain Bound default/zkdatadir-zk-2 1h
zkdatadir-zk-2 20Gi RWO Retain Bound default/zkdatadir-zk-0 1h
[root@k8s-master01 zk-cluster]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESSMODES STORAGECLASS AGE
zkdatadir-zk-0 Bound zkdatadir-zk-2 20Gi RWO 1h
zkdatadir-zk-1 Bound zkdatadir-zk-0 20Gi RWO 1h
zkdatadir-zk-2 Bound zkdatadir-zk-1 20Gi RWO 1h

8. 创建Service、ConfigMap、StatefulSet、PodDisruptionBudget
[root@k8s-master01 zk-cluster]# vim zookeeper.yaml
apiVersion: v1
kind: Service
metadata:
name: zk-headless
labels:
app: zk-headless
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: ConfigMap
metadata:
name: zk-config
data:
replicas: "3"
ensemble: "zk-0;zk-1;zk-2"
jvm.heap: "512M"
tick: "2000"
init: "10"
sync: "5"
client.cnxns: "10000"
snap.retain: "3"
purge.interval: "1"
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-budget
spec:
selector:
matchLabels:
app: zk
minAvailable: 2
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: zk
spec:
serviceName: zk-headless
replicas: 3
template:
metadata:
labels:
app: zk
annotations:
pod.alpha.kubernetes.io/initiallized: "true"
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: k8szk
imagePullPolicy: Always
image: 192.168.1.170/tzg-prod/zookeeper
resources:
requests:
memory: "1Gi"
cpu: "1"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
env:
- name: ZK_REPLICAS
valueFrom:
configMapKeyRef:
name: zk-config
key: replicas
- name: ZK_ENSEMBLE
valueFrom:
configMapKeyRef:
name: zk-config
key: ensemble
- name: ZK_HEAP_SIZE
valueFrom:
configMapKeyRef:
name: zk-config
key: jvm.heap
- name: ZK_TICK_TIME
valueFrom:
configMapKeyRef:
name: zk-config
key: tick
- name: ZK_INIT_LIMIT
valueFrom:
configMapKeyRef:
name: zk-config
key: init
- name: ZK_SYNC_TIME
valueFrom:
configMapKeyRef:
name: zk-config
key: tick
- name: ZK_MAX_CLIENT_CNXNS
valueFrom:
configMapKeyRef:
name: zk-config
key: client.cnxns
- name: ZK_SNAP_RETAIN_COUNT
valueFrom:
configMapKeyRef:
name: zk-config
key: snap.retain
- name: ZK_PURGE_INTERVAL
valueFrom:
configMapKeyRef:
name: zk-config
key: purge.interval
- name: ZK_CLIENT_PORT
value: "2181"
- name: ZK_SERVER_PORT
value: "2888"
- name: ZK_ELECTION_PORT
value: "3888"
command:
- sh
- -c
- /opt/app/zookeeper/bin/zkGenConfig.sh && /opt/app/zookeeper/bin/zkServer.sh start-foreground
readinessProbe:
exec:
command:
- "zkOk.sh"
initialDelaySeconds: 15
timeoutSeconds: 5
livenessProbe:
exec:
command:
- "zkOk.sh"
initialDelaySeconds: 15
timeoutSeconds: 5
volumeMounts:
- name: zkdatadir
mountPath: /opt/data/zookeeper
volumeClaimTemplates:
- metadata:
name: zkdatadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 20Gi
[root@k8s-master01 zk-cluster]# kubectl create -f zookeeper.yaml

9. 查看zookeeper集群pod
[root@k8s-master01 zk-cluster]# kubectl get svc zk-headless
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
zk-headless None <none> 2888/TCP,3888/TCP 45m
[root@k8s-master01 zk-cluster]# kubectl get statefulset zk
NAME DESIRED CURRENT AGE
zk 3 3 45m
[root@k8s-master01 zk-cluster]# kubectl get pods
NAME READY STATUS RESTARTS AGE
zk-0 1/1 Running 0 20m
zk-1 1/1 Running 0 20m
zk-2 1/1 Running 0 8m

10. 验证zookeeper集群
[root@k8s-master01 zk-cluster]# kubectl exec zk-0 -- /opt/app/zookeeper/bin/zkMetrics.sh | grep zk_server_state
zk_server_state follower
[root@k8s-master01 zk-cluster]# kubectl exec zk-1 -- /opt/app/zookeeper/bin/zkMetrics.sh | grep zk_server_state
zk_server_state leader
[root@k8s-master01 zk-cluster]# kubectl exec zk-2 -- /opt/app/zookeeper/bin/zkMetrics.sh | grep zk_server_state
zk_server_state follower

本站仅提供存储服务,所有内容均由用户发布,如发现有害或侵权内容,请点击举报
打开APP,阅读全文并永久保存 查看更多类似文章
猜你喜欢
类似文章
K8S部署Redis Cluster集群
Pod的数据持久化3 PV与PVC
附013.Kubernetes永久存储Rook部署
Rook定制化和管理Ceph集群
适合 Kubernetes 初学者的一些实战练习 (三)
Using Existing Ceph Cluster for Kubernetes Persistent Storage
更多类似文章 >>
生活服务
热点新闻
分享 收藏 导长图 关注 下载文章
绑定账号成功
后续可登录账号畅享VIP特权!
如果VIP功能使用有故障,
可点击这里联系客服!

联系客服