k8s glusterfs存储的应用
###k8s glusterfs
#####################################################
#####################################################在所有节点安装
yum install -y centos-release-gluster
yum install glusterfs-server -y
#在三个节点都安装glusterfs
##yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma
#配置 GlusterFS 集群:
#启动 glusterFS
systemctl restart glusterd.service
systemctl enable glusterd.service
mkdir -p /gfs1
####只在主节点
gluster peer probe 192.168.3.223
gluster peer probe 192.168.3.224
gluster peer probe 192.168.3.225
gluster volume create gv1 replica 3 transport tcp 192.168.3.223:/gfs1 192.168.3.224:/gfs1 192.168.3.225:/gfs1 force
gluster peer status
###################客户端挂载volume 所有节点
yum install -y centos-release-gluster
yum install -y glusterfs glusterfs-fuse
mkdir -p /gv1
mount -t glusterfs localhost:gv1 /gv1
echo 'localhost:/gv1 /gv1 glusterfs _netdev,rw,acl 0 0' >>/etc/fstab
#####################################################
#####################################################配置gluster存储 只在一台控制节点操作
echo '
apiVersion: v1
kind: Endpoints
metadata:
name: gfs
namespace: default
subsets:
- addresses:
- ip: 192.168.3.223
- ip: 192.168.3.224
- ip: 192.168.3.225
ports:
- port: 49152
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: gfs
namespace: default
spec:
ports:
- port: 49152
protocol: TCP
targetPort: 49152
sessionAffinity: None
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: gfs-gv1-pydir-pv
labels:
type: glusterfs
spec:
storageClassName: gv1
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
glusterfs:
endpoints: "gfs"
path: "gv1/pydir"
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gfs-gv1-pydir-pvc
namespace: default
spec:
storageClassName: gv1
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
' >gluster_pydir.yaml
kubectl apply -f gluster_pydir.yaml
kubectl get pv,pvc
#####################################################
#####################################################glusterfs 应用到部署实例中 只在一台控制节点操作
cat >pydemo.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: pydemo
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: pydemo
template:
metadata:
labels:
app: pydemo
spec:
containers:
- name: pydemo
image: k.meilele.com:30050/py-web6.8:1
ports:
- containerPort: 8080
volumeMounts:
- name: gfspydir
mountPath: /data/pydir
volumes:
- name: gfspydir
persistentVolumeClaim:
claimName: gfs-gv1-pydir-pvc
---
apiVersion: v1
kind: Service
metadata:
name: pydemo
namespace: default
spec:
selector:
app: pydemo
ports:
- port: 80
targetPort: 8080
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: ingress-pydemo
namespace: default
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: vpn.testweb.com
http:
paths:
- path: /
backend:
serviceName: pydemo
servicePort: 80
EOF
kubectl apply -f pydemo.yaml
kubectl get pod,svc,Ingress -o wide |grep pydemo
kubectl describe pod $(kubectl get pod |grep pydemo |awk '{print $1}')
###k8s glusterfs##########################################################################################################在所有节点安装yum install -y centos-release-glusteryum install glusterfs-server -y
#在三个节点都安装glusterfs##yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma#配置 GlusterFS 集群:#启动 glusterFSsystemctl restart glusterd.servicesystemctl enable glusterd.servicemkdir -p /gfs1
####只在主节点gluster peer probe 192.168.3.223gluster peer probe 192.168.3.224gluster peer probe 192.168.3.225
gluster volume create gv1 replica 3 transport tcp 192.168.3.223:/gfs1 192.168.3.224:/gfs1 192.168.3.225:/gfs1 force
gluster peer status###################客户端挂载volume 所有节点
yum install -y centos-release-glusteryum install -y glusterfs glusterfs-fuse
mkdir -p /gv1mount -t glusterfs localhost:gv1 /gv1echo 'localhost:/gv1 /gv1 glusterfs _netdev,rw,acl 0 0' >>/etc/fstab
##########################################################################################################配置gluster存储 只在一台控制节点操作echo 'apiVersion: v1kind: Endpointsmetadata: name: gfs namespace: defaultsubsets:- addresses: - ip: 192.168.3.223 - ip: 192.168.3.224 - ip: 192.168.3.225 ports: - port: 49152 protocol: TCP
---apiVersion: v1kind: Servicemetadata: name: gfs namespace: defaultspec: ports: - port: 49152 protocol: TCP targetPort: 49152 sessionAffinity: None
---apiVersion: v1kind: PersistentVolumemetadata: name: gfs-gv1-pydir-pv labels: type: glusterfsspec: storageClassName: gv1 capacity: storage: 1Gi accessModes: - ReadWriteMany glusterfs: endpoints: "gfs" path: "gv1/pydir" readOnly: false ---apiVersion: v1kind: PersistentVolumeClaimmetadata: name: gfs-gv1-pydir-pvc namespace: defaultspec: storageClassName: gv1 accessModes: - ReadWriteMany resources: requests: storage: 1Gi' >gluster_pydir.yaml
kubectl apply -f gluster_pydir.yaml
kubectl get pv,pvc
##########################################################################################################glusterfs 应用到部署实例中 只在一台控制节点操作cat >pydemo.yaml <<EOFapiVersion: apps/v1kind: Deploymentmetadata: name: pydemo namespace: defaultspec: replicas: 2 selector: matchLabels: app: pydemo template: metadata: labels: app: pydemo spec: containers: - name: pydemo image: k.meilele.com:30050/py-web6.8:1 ports: - containerPort: 8080 volumeMounts: - name: gfspydir mountPath: /data/pydir volumes: - name: gfspydir persistentVolumeClaim: claimName: gfs-gv1-pydir-pvc ---apiVersion: v1kind: Servicemetadata: name: pydemo namespace: defaultspec: selector: app: pydemo ports: - port: 80 targetPort: 8080 ---apiVersion: extensions/v1beta1kind: Ingressmetadata: name: ingress-pydemo namespace: default annotations: kubernetes.io/ingress.class: "nginx"spec: rules: - host: vpn.testweb.com http: paths: - path: / backend: serviceName: pydemo servicePort: 80
EOF
kubectl apply -f pydemo.yaml
kubectl get pod,svc,Ingress -o wide |grep pydemo
kubectl describe pod $(kubectl get pod |grep pydemo |awk '{print $1}')