从零开始搭建内网高可用k8s集群

笑着哭i 提交于 2019-12-14 04:01:27

前言

前段时间由于项目需求,需要搭建k8s集群,由于是首次接触,因此经验不足,只搭建了一个最基础的简单集群,现重新在内网搭建高可用集群+分布式存储,记录搭建过程

前期准备

服务器硬件准备

服务器 ip 系统
master1 172.16.140.100 centos7
master2 172.16.140.101 centos7
node1 172.16.140.102 centos7
node2 172.16.140.103 centos7
test——外网服务器,下软件包用 192.168.2.190 centos7

服务器软件准备

  • 保证master1,master2,node1,node2互通
  • 配置hosts,hostname
# 配置hostname
hostnamectl set-hostname master1
hostnamectl set-hostname master2
hostnamectl set-hostname node1
hostnamectl set-hostname node2
# 每台机器配置hosts
cat >/etc/hosts <<EOF 
172.16.140.100 master1
172.16.140.101 master2
172.16.140.102 node1
172.16.140.103 node2
EOF
  • 关闭防火墙,selinux,swap
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
  • 配置内核参数,iptables
# 配置k8s iptables
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF
# 配置ceph iptables
cat > /etc/sysctl.d/ceph.conf <<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

第一步-搭建内网yum仓库

root@test:

  • 安装必要工具
yum -y install wget
yum -y install epel-release
yum -y install yum-utils
yum -y install createrepo
  • 修改本机源
# 配置国内epel源,备份原先源
mkdir /etc/yum.repos.d/bak && mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/ceph/repo/centos7_base.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/ceph/repo/epel-7.repo
# 修改国内ceph源
cat >/etc/yum.repos.d/ceph.repo <<EOF
[ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
EOF
# 修改国内k8s源
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 配置国内docker源
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
# 更新repo
yum clean all && yum makecache
  • 下载安装所需要的所有仓库
mkdir /root/local-repo
# 下载到本地
repotrack \
createrepo \
yum-utils \
ceph \
ceph-mgr \
ceph-mon \
ceph-mds \
ceph-osd \
ceph-fuse \
ceph-radosgw \
ceph-mgr-dashboard \
yum-plugin-priorities \
python-backports \
python-execnet \
python-ipaddress \
python-remoto \
python-setuptools \
ntp \
ntpdate \
ntp-doc \
docker-ce-18.06.1.ce-3.el7 \
docker-compose \
kubelet-1.16.3-0 \
kubeadm-1.16.3-0 \
kubectl-1.16.3-0 \
haproxy \
keepalived \
net-tools \
nmap-ncat \
-p /root/local-repo
# 生成仓库
createrepo -v /root/local-repo
# 打包
tar -zcf local-repo.tar.gz local-repo
  • 将local-repo.tar.gz拷贝至node2服务器/root文件夹下

root@node2:

  • 先搭建本地源
# 解压文件
tar -xvf local-repo.tar.gz
# 备份源
mkdir /etc/yum.repos.d/bak && mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak
# 写入本地源文件
vi /etc/yum.repos.d/local-repo.repo

local-repo.repo

[localrepo]

name=Local Repository
baseurl=file:///root/local-repo
gpgcheck=0
enabled=1
  • 安装搭建仓库需要的软件
yum -y install createrepo
  • 搭建本地仓库
cd /root
# 启动http服务
python -m SimpleHTTPServer 80

root@master1, master2, node1

  • 配置本地仓库路径
vi /etc/yum.repos.d/local-repo.repo
[localcephrepo]

name=Local Repository
baseurl=http://172.16.140.103/local-repo/
gpgcheck=0
enabled=1

第二步-搭建内网harbor服务器

root@test:

  • 安装docker,docker-compose并启动
yum -y install docker-ce-18.06.1.ce-3.el7 docker-compose
systemctl start docker
docker version
  • 下载harbor离线包
curl -O https://github.com/goharbor/harbor/releases/download/v1.9.3/harbor-offline-installer-v1.9.3.tgz
  • 下载所需要image,打标并保存image
# 下载所需镜像
docker pull nginx:latest
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.16.3
docker pull kubernetesui/metrics-scraper:v1.0.1
docker pull quay.io/coreos/flannel:v0.11.0-amd64
docker pull quay.io/external_storage/rbd-provisioner:latest
docker pull quay.io/external_storage/cephfs-provisioner:latest
docker pull registry.aliyuncs.com/google_containers/pause:3.1
docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.16.3
docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.16.3
docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.16.3
docker pull registry.aliyuncs.com/google_containers/etcd:3.3.15-0
docker pull registry.aliyuncs.com/google_containers/coredns:1.6.2
docker pull quay.io/prometheus/node-exporter:v0.18.1
docker pull quay.io/coreos/kube-rbac-proxy:v0.4.1
docker pull quay.io/coreos/prometheus-operator:v0.34.0
docker pull grafana/grafana:6.4.3
docker pull quay.io/prometheus/alertmanager:v0.18.0
docker pull kubernetesui/dashboard:v2.0.0-beta8
docker pull quay.io/coreos/prometheus-config-reloader:v0.34.0
docker pull quay.io/coreos/kube-state-metrics:v1.8.0
docker pull quay.io/prometheus/prometheus:v2.11.0
docker pull quay.io/coreos/k8s-prometheus-adapter-amd64:v0.5.0
docker pull loveone/kubernetes-dashboard-amd64:v1.10.1
docker pull quay.io/coreos/configmap-reload:v0.0.1
docker pull registry.aliyuncs.com/google_containers/busybox:1.24
# 将镜像打标
docker tag nginx:latest 172.16.140.103/kubernetes-deploy/nginx:latest
docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.16.3 172.16.140.103/kubernetes-deploy/kube-proxy:v1.16.3
docker tag kubernetesui/metrics-scraper:v1.0.1 172.16.140.103/kubernetes-deploy/metrics-scraper:v1.0.1
docker tag quay.io/coreos/flannel:v0.11.0-amd64 172.16.140.103/kubernetes-deploy/flannel:v0.11.0-amd64
docker tag quay.io/external_storage/rbd-provisioner:latest 172.16.140.103/kubernetes-deploy/rbd-provisioner:latest
docker tag quay.io/external_storage/cephfs-provisioner:latest 172.16.140.103/kubernetes-deploy/cephfs-provisioner:latest
docker tag registry.aliyuncs.com/google_containers/pause:3.1 172.16.140.103/kubernetes-deploy/pause:3.1
docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.16.3 172.16.140.103/kubernetes-deploy/kube-apiserver:v1.16.3
docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.16.3 172.16.140.103/kubernetes-deploy/kube-controller-manager:v1.16.3
docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.16.3 172.16.140.103/kubernetes-deploy/kube-scheduler:v1.16.3
docker tag registry.aliyuncs.com/google_containers/etcd:3.3.15-0 172.16.140.103/kubernetes-deploy/etcd:3.3.15-0
docker tag registry.aliyuncs.com/google_containers/coredns:1.6.2 172.16.140.103/kubernetes-deploy/coredns:1.6.2
docker tag quay.io/prometheus/node-exporter:v0.18.1 172.16.140.103/kubernetes-deploy/node-exporter:v0.18.1
docker tag quay.io/coreos/kube-rbac-proxy:v0.4.1 172.16.140.103/kubernetes-deploy/kube-rbac-proxy:v0.4.1
docker tag quay.io/coreos/prometheus-operator:v0.34.0 172.16.140.103/kubernetes-deploy/prometheus-operator:v0.34.0
docker tag grafana/grafana:6.4.3 172.16.140.103/kubernetes-deploy/grafana:6.4.3
docker tag quay.io/prometheus/alertmanager:v0.18.0 172.16.140.103/kubernetes-deploy/alertmanager:v0.18.0
docker tag kubernetesui/dashboard:v2.0.0-beta8 172.16.140.103/kubernetes-deploy/dashboard:v2.0.0-beta8
docker tag quay.io/coreos/prometheus-config-reloader:v0.34.0 172.16.140.103/kubernetes-deploy/prometheus-config-reloader:v0.34.0
docker tag quay.io/coreos/kube-state-metrics:v1.8.0 172.16.140.103/kubernetes-deploy/kube-state-metrics:v1.8.0
docker tag quay.io/prometheus/prometheus:v2.11.0 172.16.140.103/kubernetes-deploy/prometheus:v2.11.0
docker tag quay.io/coreos/k8s-prometheus-adapter-amd64:v0.5.0 172.16.140.103/kubernetes-deploy/k8s-prometheus-adapter-amd64:v0.5.0
docker tag loveone/kubernetes-dashboard-amd64:v1.10.1 172.16.140.103/kubernetes-deploy/kubernetes-dashboard-amd64:v1.10.1
docker tag quay.io/coreos/configmap-reload:v0.0.1 172.16.140.103/kubernetes-deploy/configmap-reload:v0.0.1
docker tag registry.aliyuncs.com/google_containers/busybox:1.24 172.16.140.103/kubernetes-deploy/busybox:1.24
# 将镜像保存至本地
docker save 172.16.140.103/kubernetes-deploy/nginx:latest -o /images/nginx.tar
docker save 172.16.140.103/kubernetes-deploy/kube-proxy:v1.16.3 -o /images/kube-proxy.tar
docker save 172.16.140.103/kubernetes-deploy/metrics-scraper:v1.0.1 -o /images/metrics-scraper.tar
docker save 172.16.140.103/kubernetes-deploy/flannel:v0.11.0-amd64 -o /images/flannel.tar
docker save 172.16.140.103/kubernetes-deploy/rbd-provisioner:latest -o /images/rbd-provisioner.tar
docker save 172.16.140.103/kubernetes-deploy/cephfs-provisioner:latest -o /images/cephfs-provisioner.tar
docker save 172.16.140.103/kubernetes-deploy/pause:3.1 -o /images/pause.tar
docker save 172.16.140.103/kubernetes-deploy/kube-apiserver:v1.16.3 -o /images/kube-apiserver.tar
docker save 172.16.140.103/kubernetes-deploy/kube-controller-manager:v1.16.3 -o /images/kube-controller-manager.tar
docker save 172.16.140.103/kubernetes-deploy/kube-scheduler:v1.16.3 -o /images/kube-scheduler.tar
docker save 172.16.140.103/kubernetes-deploy/etcd:3.3.15-0 -o /images/etcd.tar
docker save 172.16.140.103/kubernetes-deploy/coredns:1.6.2 -o /images/coredns.tar
docker save 172.16.140.103/kubernetes-deploy/node-exporter:v0.18.1 -o /images/node-exporter.tar
docker save 172.16.140.103/kubernetes-deploy/kube-rbac-proxy:v0.4.1 -o /images/kube-rbac-proxy.tar
docker save 172.16.140.103/kubernetes-deploy/prometheus-operator:v0.34.0 -o /images/prometheus-operator.tar
docker save 172.16.140.103/kubernetes-deploy/grafana:6.4.3 -o /images/grafana.tar
docker save 172.16.140.103/kubernetes-deploy/alertmanager:v0.18.0 -o /images/alertmanager.tar
docker save 172.16.140.103/kubernetes-deploy/dashboard:v2.0.0-beta8 -o /images/dashboard.tar
docker save 172.16.140.103/kubernetes-deploy/prometheus-config-reloader:v0.34.0 -o /images/prometheus-config-reloader.tar
docker save 172.16.140.103/kubernetes-deploy/kube-state-metrics:v1.8.0 -o /images/kube-state-metrics.tar
docker save 172.16.140.103/kubernetes-deploy/prometheus:v2.11.0 -o /images/prometheus.tar
docker save 172.16.140.103/kubernetes-deploy/k8s-prometheus-adapter-amd64:v0.5.0 -o /images/k8s-prometheus-adapter-amd64.tar
docker save 172.16.140.103/kubernetes-deploy/kubernetes-dashboard-amd64:v1.10.1 -o /images/kubernetes-dashboard-amd64.tar
docker save 172.16.140.103/kubernetes-deploy/configmap-reload:v0.0.1 -o /images/configmap-reload.tar
docker save 172.16.140.103/kubernetes-deploy/busybox:1.24 -o /images/busybox.tar
  • 拷贝images文件夹至node2的/root文件夹下
  • 拷贝harbor-offline-installer-v1.9.3.tgz至node2的/root文件夹下

root@ALL:

  • 安装docker,docker-compose并设置相关参数
# docker-compose可以只在node2上安装
yum -y install docker-compose
yum -y install docker
# 配置docker启动方式为systemd和为harbor服务器设置非https链接
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "insecure-registries": ["172.16.140.103"]
}
EOF
systemctl restart docker
docker info | grep Cgroup

root@node2:

  • 解压harbor离线包
tar -xvf /root/harbor-offline-installer-v1.9.3.tgz
cd harbor/
  • 修改harbor文件夹下的文件harbor.yml
sed -i 's/reg.mydomain.com/172.16.140.103/g' harbor.yml
  • 安装harbor
./install.sh
  • 登陆harbor

  • 浏览器打开http://172.16.140.103/harbor/
    在这里插入图片描述

  • 输入用户名admin,密码Harbor12345登陆

  • 新建项目kubernetes-deploy

  • 注:harbor服务器的启动和停止:
    docker-compose start/docker-compose stop

  • docker加载镜像

docker load -i nginx.tar
docker load -i kube-proxy.tar
docker load -i metrics-scraper.tar
docker load -i flannel.tar
docker load -i rbd-provisioner.tar
docker load -i cephfs-provisioner.tar
docker load -i pause.tar
docker load -i kube-apiserver.tar
docker load -i kube-controller-manager.tar
docker load -i kube-scheduler.tar
docker load -i etcd.tar
docker load -i coredns.tar
docker load -i node-exporter.tar
docker load -i kube-rbac-proxy.tar
docker load -i dashboard.tar
docker load -i prometheus-operator.tar
docker load -i grafana.tar
docker load -i alertmanager.tar
docker load -i dashboard.tar
docker load -i prometheus-config-reloader.tar
docker load -i kube-state-metrics.tar
docker load -i prometheus.tar
docker load -i k8s-prometheus-adapter-amd64.tar
docker load -i kubernetes-dashboard-amd64.tar
docker load -i configmap-reload.tar
docker load -i busybox.tar
  • 上传至仓库
docker push 172.16.140.103/kubernetes-deploy/nginx:latest
docker push 172.16.140.103/kubernetes-deploy/kube-proxy:v1.16.3
docker push 172.16.140.103/kubernetes-deploy/metrics-scraper:v1.0.1
docker push 172.16.140.103/kubernetes-deploy/flannel:v0.11.0-amd64
docker push 172.16.140.103/kubernetes-deploy/rbd-provisioner:latest
docker push 172.16.140.103/kubernetes-deploy/cephfs-provisioner:latest
docker push 172.16.140.103/kubernetes-deploy/pause:3.1
docker push 172.16.140.103/kubernetes-deploy/kube-apiserver:v1.16.3
docker push 172.16.140.103/kubernetes-deploy/kube-controller-manager:v1.16.3
docker push 172.16.140.103/kubernetes-deploy/kube-scheduler:v1.16.3
docker push 172.16.140.103/kubernetes-deploy/etcd:3.3.15-0
docker push 172.16.140.103/kubernetes-deploy/coredns:1.6.2
docker push 172.16.140.103/kubernetes-deploy/node-exporter:v0.18.1
docker push 172.16.140.103/kubernetes-deploy/kube-rbac-proxy:v0.4.1
docker push 172.16.140.103/kubernetes-deploy/prometheus-operator:v0.34.0
docker push 172.16.140.103/kubernetes-deploy/grafana:6.4.3
docker push 172.16.140.103/kubernetes-deploy/alertmanager:v0.18.0
docker push 172.16.140.103/kubernetes-deploy/dashboard:v2.0.0-beta8
docker push 172.16.140.103/kubernetes-deploy/prometheus-config-reloader:v0.34.0
docker push 172.16.140.103/kubernetes-deploy/kube-state-metrics:v1.8.0
docker push 172.16.140.103/kubernetes-deploy/prometheus:v2.11.0
docker push 172.16.140.103/kubernetes-deploy/k8s-prometheus-adapter-amd64:v0.5.0
docker push 172.16.140.103/kubernetes-deploy/kubernetes-dashboard-amd64:v1.10.1
docker push 172.16.140.103/kubernetes-deploy/configmap-reload:v0.0.1
docker push 172.16.140.103/kubernetes-deploy/busybox:1.24
  • 在网页上查看仓库是否已经保存镜像
    在这里插入图片描述

第三步-在master1,master2节点上搭建haproxy+keepalived框架

root@master1,master2:

  • 安装相关插件
yum install -y haproxy keepalived net-tools nmap-ncat
  • 配置haproxy
sed -i '$a\
#---------------------------------------------------------------------\
# kube-api-server-listen\
#---------------------------------------------------------------------\
listen kube-api-lb\
	bind 0.0.0.0:10443\
	mode tcp\
	balance roundrobin\
	server master1 172.16.140.100:6443 weight 1 maxconn 10000 check inter 10s\
	server master2 172.16.140.101:6443 weight 1 maxconn 10000 check inter 10s\
\
#---------------------------------------------------------------------\
# kube-api-haproxy-stats\
#---------------------------------------------------------------------\
listen admin_stats\
	bind 0.0.0.0:8099\
	mode http\
	option httplog\
	maxconn 10\
	stats refresh 30s\
	stats uri /stats' /etc/haproxy/haproxy.cfg
  • 配置keepalived
mkdir -p /etc/keepalived/scripts/
cat > /etc/keepalived/scripts/haproxy_check.sh << \EOF
#!/bin/bash
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ]
then
	systemctl start haproxy
	sleep 3
	if [ `ps -C haproxy --no-header |wc -l` -eq 0 ]
	then
		systemctl stop keepalived
	fi
fi
EOF
cat > /etc/keepalived/scripts/notifi_master.sh << \EOF
#!/bin/bash
VIP=172.xx.xx.xx
GATEWAY=172.xx.xx.xx
/sbin/arping -I eth0 -c 5 -s $VIP $GATEWAY &>/dev/null
EOF
chmod +x /etc/keepalived/scripts/haproxy_check.sh /etc/keepalived/scripts/notifi_master.sh
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.backup
  • 这里把master1节点作为主节点,master2节点作为备用节点
    在master1节点上配置主keepalived.conf
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
   router_id Haproxy-Master
   script_user root
   enable_script_security
   vrrp_skip_check_adv_addr
   vrrp_iptables
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   # vrrp_strict
}

vrrp_script chk_haproxy
{
    script "/etc/keepalived/scripts/haproxy_check.sh"
    interval 5
    fall 2
}

vrrp_instance haproxy {
    state MASTER
    interface ens32
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS      
        auth_pass 5e97s45a2  
    }
    unicast_src_ip 172.16.140.100
    unicast_peer {
      172.16.140.101
      172.16.140.102
      172.16.140.103
    }
	track_script {
        chk_haproxy
    }
    virtual_ipaddress {
        172.16.140.150
    }
	notify_master "/etc/keepalived/scripts/notifi_master.sh"
}

EOF

在master2节点上配置备keepalived.conf

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
   router_id Haproxy-Master
   script_user root
   enable_script_security
   vrrp_skip_check_adv_addr
   vrrp_iptables
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_script chk_haproxy
{
    script "/etc/keepalived/scripts/haproxy_check.sh"
    interval 5
    fall 2
}

vrrp_instance haproxy {
    state BACKUP
    interface ens32
    virtual_router_id 51
    priority 90
    advert_int 1
    authentication {
        auth_type PASS      
        auth_pass 5e97s45a2  
    }
    unicast_src_ip 172.16.140.101
    unicast_peer {
      172.16.140.100
      172.16.140.102
      172.16.140.103
    }
    virtual_ipaddress {
        172.16.140.150
    }
	notify_master "/etc/keepalived/scripts/notifi_master.sh"
}

EOF
  • 启动keepalived
systemctl stop NetworkManager
systemctl start keepalived
systemctl enable keepalived
netstat -ntplu|grep 10443

第四步-部署k8s集群

root@master1:

  • 使用kubeadm部署master1
    pod-network-cidr为flanneld网络默认地址
kubeadm init --kubernetes-version=1.16.3 \
--apiserver-advertise-address=172.16.140.100 \
--image-repository 172.16.140.103/kubernetes-deploy \
--service-cidr=10.1.0.0/16 \
--control-plane-endpoint "172.16.140.150:10443" \
--upload-certs \
--pod-network-cidr=10.244.0.0/16
  • 记录加入master节点的token和加入node节点的token
  • 配置kubectl工具
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes
kubectl get cs
  • 部署flanneld网络
kubectl apply -f flanneld.yaml
# 检测集群状态
kubectl get nodes

root@master2:

  • 使用之前记录的加入命令加入k8s集群(control-plane)

root@node1,node2:

  • 使用之前记录的加入命令加入k8s集群
  • 在master1或者master2节点上查看集群状态
kubectl get pod,svc,cs,node -Ao wide

第五步-部署dashboard

root@master1:

  • 生成dashboard.yaml
cat>kube-dashboard.yaml<<\EOF
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
	  nodePort: 30001
  type: NodePort
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: 172.16.140.103/kubernetes-deploy/dashboard:v2.0.0-beta8
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "beta.kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: 172.16.140.103/kubernetes-deploy/metrics-scraper:v1.0.1
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "beta.kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}
EOF
  • 替换过期证书
#生成证书
openssl genrsa -out dashboard.key 2048
openssl req -new -out dashboard.csr -key dashboard.key -subj '/CN=172.16.140.150'
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
#删除原有的证书secret
kubectl delete secret kubernetes-dashboard-certs -n kubernetes-dashboard
#创建新的证书secret
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
#查看pod
kubectl get pod -n kubernetes-dashboard
#重启pod
kubectl delete pod <pod name> -n kubernetes-dashboard
  • 创建账号访问
kubectl create serviceaccount  dashboard-admin -n kubernetes-dashboard
kubectl create clusterrolebinding  dashboard-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:dashboard-admin
kubectl describe secrets -n kubernetes-dashboard $(kubectl -n kubernetes-dashboard get secret | awk '/dashboard-admin/{print $1}')
  • 登陆dashboard
    输入https://172.16.140.150:30001/访问dashboard
    在这里插入图片描述

第六步-搭建ceph

root@ALL:

  • 配置myceph用户
useradd -d /home/myceph -m myceph
passwd  myceph
  • 为myceph添加sudo权限
sed -i "/## Allow root to run any commands anywhere /a\\\myceph ALL = (ALL)  ALL" /etc/sudoers
echo "myceph ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/myceph
sudo chmod 0440 /etc/sudoers.d/myceph
  • 在所有节点安装ceph组件
yum install -y ceph ceph-radosgw yum-plugin-priorities
  • 安装ntp服务
yum install -y ntp ntpdate ntp-doc
  • 配置ntp内网同步
    这里以node2作为ntp服务器
    在node2上修改ntp.conf
vi /etc/ntp.conf

ntp.conf

# For more information about this file, see the man pages
# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).

driftfile /var/lib/ntp/drift

# Permit time synchronization with our time source, but do not
# permit the source to query or modify the service on this system.
restrict default nomodify notrap nopeer noquery

# Permit all access over the loopback interface.  This could
# be tightened as well, but to do so would effect some of
# the administrative functions.
restrict 127.0.0.1 
restrict ::1

# Hosts on local network are less restricted.
#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap

# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
# server 0.centos.pool.ntp.org iburst
# server 1.centos.pool.ntp.org iburst
# server 2.centos.pool.ntp.org iburst
# server 3.centos.pool.ntp.org iburst
server 127.127.1.0
fudge 127.127.1.0 stratum 10
restrict 172.16.140.0 mask 255.255.255.0 #允许客户端网段的所有主机向本机请求时间同步

#broadcast 192.168.1.255 autokey	# broadcast server
#broadcastclient			# broadcast client
#broadcast 224.0.1.1 autokey		# multicast server
#multicastclient 224.0.1.1		# multicast client
#manycastserver 239.255.254.254		# manycast server
#manycastclient 239.255.254.254 autokey # manycast client

# Enable public key cryptography.
#crypto

includefile /etc/ntp/crypto/pw

# Key file containing the keys and key identifiers used when operating
# with symmetric key cryptography. 
keys /etc/ntp/keys

# Specify the key identifiers which are trusted.
#trustedkey 4 8 42

# Specify the key identifier to use with the ntpdc utility.
#requestkey 8

# Specify the key identifier to use with the ntpq utility.
#controlkey 8

# Enable writing of statistics records.
#statistics clockstats cryptostats loopstats peerstats

# Disable the monitoring facility to prevent amplification attacks using ntpdc
# monlist command when default restrict does not include the noquery flag. See
# CVE-2013-5211 for more details.
# Note: Monitoring will not be disabled with the limited restriction flag.
disable monitor

主要在serverpool下方新增ntp本地server并注释原先互联网ntp-server
server 127.127.1.0
fudge 127.127.1.0 stratum 10
restrict 172.16.140.0 mask 255.255.255.0

  • 在root@node1,naster1,master2上设置ntp同步
    修改ntp.conf
# For more information about this file, see the man pages
# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).

driftfile /var/lib/ntp/drift

# Permit time synchronization with our time source, but do not
# permit the source to query or modify the service on this system.
restrict default nomodify notrap nopeer noquery

# Permit all access over the loopback interface.  This could
# be tightened as well, but to do so would effect some of
# the administrative functions.
restrict 127.0.0.1 
restrict ::1

# Hosts on local network are less restricted.
#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap

# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
# server 0.centos.pool.ntp.org iburst
# server 1.centos.pool.ntp.org iburst
# server 2.centos.pool.ntp.org iburst
# server 3.centos.pool.ntp.org iburst
server 172.16.140.103    #指名上层NTP服务器
restrict 172.16.140.103       #放行

#broadcast 192.168.1.255 autokey	# broadcast server
#broadcastclient			# broadcast client
#broadcast 224.0.1.1 autokey		# multicast server
#multicastclient 224.0.1.1		# multicast client
#manycastserver 239.255.254.254		# manycast server
#manycastclient 239.255.254.254 autokey # manycast client

# Enable public key cryptography.
#crypto

includefile /etc/ntp/crypto/pw

# Key file containing the keys and key identifiers used when operating
# with symmetric key cryptography. 
keys /etc/ntp/keys

# Specify the key identifiers which are trusted.
#trustedkey 4 8 42

# Specify the key identifier to use with the ntpdc utility.
#requestkey 8

# Specify the key identifier to use with the ntpq utility.
#controlkey 8

# Enable writing of statistics records.
#statistics clockstats cryptostats loopstats peerstats

# Disable the monitoring facility to prevent amplification attacks using ntpdc
# monlist command when default restrict does not include the noquery flag. See
# CVE-2013-5211 for more details.
# Note: Monitoring will not be disabled with the limited restriction flag.
disable monitor

设置serverpool上层服务器
server 172.16.140.103
restrict 172.16.140.103

  • 查看ntp服务是否已经启动并已经同步
# 查看ntpstat
ntpstat
# 查看ntpd
ntpq -p

myceph@node2:

  • 生成rsa-key实现免密登陆
# 生成key
ssh-keygen  -t  rsa
# 拷贝到各个节点
ssh-copy-id -i .ssh/id_rsa.pub myceph@node1
ssh-copy-id -i .ssh/id_rsa.pub myceph@node2
ssh-copy-id -i .ssh/id_rsa.pub myceph@master1
ssh-copy-id -i .ssh/id_rsa.pub myceph@master2
  • 配置ssh-config
cat > ~/.ssh/config <<EOF
Host ceph-node1
Hostname master1
User myceph

Host ceph-node2
Hostname master2
User myceph

Host ceph-node3
Hostname node1
User myceph

Host ceph-node3
Hostname node2
User myceph
EOF
# 配置该文件权限
sudo chmod 600 ~/.ssh/config
  • 安装ceph-deploy
sudo yum install -y python-backports python-execnet python-ipaddress python-remoto python-setuptools
sudo yum install ceph-deploy -y
  • 搭建ceph集群
mkdir my-cluster  
cd my-cluster
# 部署mon
ceph-deploy new node2 node1
  • 修改ceph.conf在最后一行添加public network配置
vi ceph.conf

ceph.conf

[global]
fsid = f9cf1256-b100-4620-88df-2f9481fc24bd
mon_initial_members = node2, node1
mon_host = 172.16.140.103,172.16.140.102
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public network = 172.16.140.0/24
  • 初始化mon,mgr,osd,mds
# 部署mon
ceph-deploy mon create-initial
# 赋予节点admin权限
ceph-deploy admin node1 node2 master1 master2
# 增加mgr
ceph-deploy mgr create node2 node1 master2 master1
# 增加osd
ceph-deploy osd create --data /dev/sdb node1
ceph-deploy osd create --data /dev/sdb node2
ceph-deploy osd create --data /dev/sdb master1
ceph-deploy osd create --data /dev/sdb master2
# 增加mds
ceph-deploy mds create node2 node1 master2 master1

root@node2:

  • 开启dashboard
yum install -y ceph-mgr-dashboard
ceph mgr module enable dashboard
# 生成证书
openssl req -new -nodes -x509 \
  -subj "/O=IT/CN=ceph-mgr-dashboard" -days 3650 \
  -keyout dashboard.key -out dashboard.crt -extensions v3_ca
ceph dashboard set-ssl-certificate -i dashboard.crt
ceph dashboard set-ssl-certificate-key -i dashboard.key
# 设置端口,ip
ceph config set mgr mgr/dashboard/server_addr 172.16.140.103
ceph config set mgr mgr/dashboard/server_port 7000
ceph config set mgr mgr/dashboard/ssl_server_port 8443
# 设置用户
ceph dashboard ac-user-create admin admin administrator
# 查看地址
ceph mgr services
  • 登陆ceph-dashboard
    输入https://172.16.140.103:8443/ 用户admin 密码admin登陆
    在这里插入图片描述
    未完待续
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!