k8s 虚拟机高可用部署

分享 123456789987654321 ⋅ 于 2022-08-28 09:36:26 ⋅ 831 阅读

1.修改ip

155 (master) 部署keepalived 部署haproxy 初始化 安装docker

156 (master) 部署keepalived 部署haproxy 添加master2节点到集群 安装docker,网络插件

157(node1) 加入到集群中 安装docker,网络插件

158虚拟ip

[root@localhost ~]# vi /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
BOOTPROTO="static"
IPADDR="192.168.92.157"
NETMASK="255.255.255.0"
GATEWAY="192.168.92.2"
DNS="192.168.92.2"
NM_CONTROLLED="no"
DEFROUTE="yes"
PEERDNS="yes"
PEERROUTES="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_PEERDNS="yes"
IPV6_PEERROUTES="yes"
IPV6_FAILURE_FATAL="no"
NAME="ens33"
UUID="aa4b6db1-a838-464d-98c6-18009dfcd8e7"
DEVICE="ens33"
ONBOOT="yes"

#重启网络服务
[root@localhost network-scripts]# systemctl stop NetworkManager.service
[root@localhost network-scripts]# systemctl disable NetworkManager.service
[root@localhost network-scripts]# systemctl restart network.service
                                # systemctl restart NetworkManager.service

#修改本地DNS
[root@localhost ~]# vim /etc/resolv.conf 
nameserver 192.168.92.2     #这里的dns要对应上述ens33中的DNS

2.基础操作

#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
#关闭 selinux  # 永久
sed -i 's/enforcing/disabled/' /etc/selinux/config 
#关闭 swap
swapoff -a # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab # 永久
#hostname  master节点执行,node不需要执行
cat >> /etc/hosts << EOF
192.168.92.158 master.k8s.io k8s-vip
192.168.92.155 master1
192.168.92.156 master2
192.168.92.157 node01.k8s.io node1
EOF
#将桥接的 IPv4 流量传递到 iptables 的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
# 生效
sysctl --system 
#时间同步
yum install ntpdate -y
ntpdate time.windows.com

3.master节点安装keepalived

yum install -y conntrack-tools libseccomp libtool-ltdl keepalived

master1节点配置

cat > /etc/keepalived/keepalived.conf <<EOF 
! Configuration File for keepalived

global_defs {
   router_id k8s
}

vrrp_script check_haproxy {
    script "killall -0 haproxy"
    interval 3
    weight -2
    fall 10
    rise 2
}

vrrp_instance VI_1 {
    state MASTER 
    interface ens33 
    virtual_router_id 51
    priority 250
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass ceb1b3ec013d66163d6ab
    }
    virtual_ipaddress {
        192.168.92.158 #ip
    }
    track_script {
        check_haproxy
    }

}
EOF

master2节点配置

cat > /etc/keepalived/keepalived.conf <<EOF 
! Configuration File for keepalived

global_defs {
   router_id k8s
}

vrrp_script check_haproxy {
    script "killall -0 haproxy"
    interval 3
    weight -2
    fall 10
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP 
    interface ens33 
    virtual_router_id 51
    priority 200
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass ceb1b3ec013d66163d6ab
    }
    virtual_ipaddress {
        192.168.92.158
    }
    track_script {
        check_haproxy
    }

}
EOF

4.启动和检查

在两台master节点都执行

# 启动keepalived
systemctl start keepalived.service
#设置开机启动
systemctl enable keepalived.service
# 查看启动状态
systemctl status keepalived.service

启动后查看master1的网卡信息

[root@localhost ~]# ip a s ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:c8:03:45 brd ff:ff:ff:ff:ff:ff
    inet 192.168.92.156/24 brd 192.168.92.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 192.168.92.158/32 scope global ens33 #挂掉之后会找这个虚拟ip
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fec8:345/64 scope link 
       valid_lft forever preferred_lft forever

5.部署haproxy(master)

yum install -y haproxy
mkdir -p /etc/haproxy/
touch /etc/haproxy/haproxy.cfg
#echo 'net.ipv4.ip_nonlocal_bind = 1'>>/etc/sysctl.conf
#两台master节点的配置均相同,配置中声明了后端代理的两个master节点服务器,指定了haproxy运行的端口为16443等,因此16443端口为集群的入口
cat > /etc/haproxy/haproxy.cfg << EOF
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon 

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------  
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
    defaults                      
    log                     127.0.0.1 local7
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#--------------------------------------------------------------------- 
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:16443
    option               tcplog
    default_backend      kubernetes-apiserver    
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server      master01.k8s.io   192.168.92.155:6443 check
    server      master02.k8s.io   192.168.92.156:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind                 *:1080
    stats auth           admin:awesomePassword
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats
EOF

启动

#  查看错误
journalctl -xe
# 设置开机启动
systemctl enable haproxy
# 开启haproxy
systemctl start haproxy
# 查看启动状态
systemctl status haproxy

检查端口

yum install net-tools -y
netstat -lntup|grep haproxy
#监听16443

6.所有节点安装Docker/kubeadm/kubelet

yum -y install wget
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce-18.06.1.ce-3.el7
systemctl enable docker && systemctl start docker
docker --version
Docker version 18.06.1-ce, build e68fc7a

cat > /etc/docker/daemon.json << EOF
{
  "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF

7.添加阿里云YUM软件源

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

8.安装kubeadm,kubelet和kubectl

#由于版本更新频繁,这里指定版本号部署:
yum install -y kubelet-1.16.3 kubeadm-1.16.3 kubectl-1.16.3 --v=5 
systemctl enable kubelet

9.部署Kubernetes Master

#在具有vip的master上操作,ip a s ens33这个命令显示的158虚拟ip的节点 
mkdir /usr/local/kubernetes/manifests -p
cd /usr/local/kubernetes/manifests/
vi kubeadm-config.yaml

apiServer:
  certSANs:
    - master1
    - master2
    - master.k8s.io
    - 192.168.92.155
    - 192.168.92.156
    - 192.168.92.158
    - 127.0.0.1
  extraArgs:
    authorization-mode: Node,RBAC
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "master.k8s.io:16443"
controllerManager: {}
dns: 
  type: CoreDNS
etcd:
  local:    
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.16.3
networking: 
  dnsDomain: cluster.local  
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.1.0.0/16
scheduler: {}

在master2(虚拟ip)节点执行

#失败 需要
kubeadm reset   

    kubeadm init \
      --apiserver-advertise-address=192.168.92.155 \
      --image-repository registry.aliyuncs.com/google_containers \
      --kubernetes-version v1.16.3  \
      --service-cidr=10.1.0.0/16 \
      --pod-network-cidr=10.244.0.0/16 \
      --v=6
#日志
kubeadm join 192.168.92.155:6443 --token x8dokg.k4xh3hcd5j3jdbhi \
    --discovery-token-ca-cert-hash sha256:1addae43a0aa276942415137348079e89740ab185b4ba7a403f09a31c8653186 

按照提示配置环境变量,使用kubectl工具:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#查看状态
kubectl get nodes
kubectl get pods -n kube-system

10.安装集群网络

从官方地址获取到flannel的yaml,在master2(vip节点)上执行

mkdir flannel
cd flannel
wget -c https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#安装flannel网络
kubectl apply -f kube-flannel.yml 
#检查
[root@localhost flannel]# kubectl get pods -n kube-system
NAME                                            READY   STATUS    RESTARTS   AGE
coredns-58cc8c89f4-nqh2h                        0/1     Pending   0          17m
coredns-58cc8c89f4-vdsll                        0/1     Pending   0          17m
etcd-localhost.localdomain                      1/1     Running   1          16m
kube-apiserver-localhost.localdomain            1/1     Running   1          17m
kube-controller-manager-localhost.localdomain   1/1     Running   1          16m
kube-flannel-ds-amd64-6296f                     1/1     Running   0          6m35s
kube-proxy-5nptc                                1/1     Running   1          17m
kube-scheduler-localhost.localdomain            1/1     Running   1          17m

11.master2节点加入集群

[root@localhost manifests]# kubectl -n kube-system edit cm kubeadm-config
apiVersion: v1
data:
  ClusterConfiguration: |
    apiServer:
      extraArgs:
        authorization-mode: Node,RBAC
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: "192.168.92.155:6443" 
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: registry.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: v1.16.3
    networking:
      dnsDomain: cluster.local
      podSubnet: 10.244.0.0/16
      serviceSubnet: 10.1.0.0/16
    scheduler: {}
  ClusterStatus: |
    apiEndpoints:
      localhost.localdomain:
        advertiseAddress: 192.168.92.155
#从master1复制密钥及相关文件到master2
ssh root@192.168.92.156 mkdir -p /etc/kubernetes/pki/etcd
scp /etc/kubernetes/admin.conf root@192.168.92.156:/etc/kubernetes
scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@192.168.92.156:/etc/kubernetes/pki
scp /etc/kubernetes/pki/etcd/ca.* root@192.168.92.156:/etc/kubernetes/pki/etcd

执行在master1上init后输出的join命令,需要带上参数--control-plane表示把master控制节点加入集群

 kubeadm join 192.168.92.155:6443 \
 --token 4j3gye.4m4f396zy6ypia31 \
--discovery-token-ca-cert-hash sha256:5d4d9d1eca578f49be8aa684aa569bc5f7017c1f2da892526d12148af9283756 \
 --control-plane --v=5

 #检查
kubectl get node
kubectl get pods --all-namespaces

12.加入Kubernetes Node

#在node1上执行
#向集群添加新节点,执行在kubeadm init输出的kubeadm join命令:
kubeadm join 192.168.92.155:6443 \
--token 4xyurf.vk5kh6rn5og0pt29 \
--discovery-token-ca-cert-hash sha256:f3ee28e27b740e70f2cb5a988b02fc71b8d81689c0cab4a80d46e678feead38e \
--v=5

#检查
kubectl get node
kubectl get pods --all-namespaces

13.测试kubernetes集群

#在Kubernetes集群中创建一个pod,验证是否正常运行:
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc

访问192.168.92.157:6443

版权声明:原创作品,允许转载,转载时务必以超链接的形式表明出处和作者信息。否则将追究法律责任。来自海汼部落-123456789987654321,http://hainiubl.com/topics/75899
点赞
成为第一个点赞的人吧 :bowtie:
回复数量: 0
    暂无评论~~
    • 请注意单词拼写,以及中英文排版,参考此页
    • 支持 Markdown 格式, **粗体**、~~删除线~~、`单行代码`, 更多语法请见这里 Markdown 语法
    • 支持表情,可用Emoji的自动补全, 在输入的时候只需要 ":" 就可以自动提示了 :metal: :point_right: 表情列表 :star: :sparkles:
    • 上传图片, 支持拖拽和剪切板黏贴上传, 格式限制 - jpg, png, gif,教程
    • 发布框支持本地存储功能,会在内容变更时保存,「提交」按钮点击时清空
    Ctrl+Enter