kubernetes1.15.0高可用keepalived+haproxy

一、Master高可用

解决Master单节点,以及etcd单节点的问题,需要针对Master高可用,etcd数据要保持一致。

[root@localhost ~]# hostnamectl set-hostname master01
[root@localhost ~]# hostnamectl set-hostname master02
[root@localhost ~]# hostnamectl set-hostname master03
#生成ssh-key
[root@localhost ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:6H0xzKWAv63KofmN8wNlt93tO/Asbl6WDICBCYhvcds root@master01
The key's randomart image is:
+---[RSA 2048]----+
|  . ... o.       |
| . o . +  o      |
|  . o + .. ..    |
|   o . Eo+.o.    |
|  .   .oS.*o o . |
|     ... o.o..+ o|
|      o.o o   +* |
|     +.+.o   oo+.|
|    o.=++.  +o..o|
+----[SHA256]-----+
[root@localhost ~]# ssh-copy-id root@172.16.216.229
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host '172.16.216.229 (172.16.216.229)' can't be established.
ECDSA key fingerprint is SHA256:RSjZGjpxNF+3FfNVScnO7si+ixmb5cvjEQChMZANJl8.
ECDSA key fingerprint is MD5:91:c5:3d:0a:22:4a:51:9b:b6:57:04:c8:f4:10:df:fd.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@172.16.216.229's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'root@172.16.216.229'"
and check to make sure that only the key(s) you wanted were added.

[root@localhost ~]# ssh-copy-id root@172.16.216.230
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host '172.16.216.230 (172.16.216.230)' can't be established.
ECDSA key fingerprint is SHA256:RSjZGjpxNF+3FfNVScnO7si+ixmb5cvjEQChMZANJl8.
ECDSA key fingerprint is MD5:91:c5:3d:0a:22:4a:51:9b:b6:57:04:c8:f4:10:df:fd.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@172.16.216.230's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'root@172.16.216.230'"
and check to make sure that only the key(s) you wanted were added.

#修改主机名
[root@localhost ~]# vim /etc/hosts
172.16.216.228 master01 master01.linuxplus.com
172.16.216.226 master02 master02.linuxplus.com
172.16.216.230 master03 master03.linuxplus.com
172.16.216.234 cluster-node1
172.16.216.235 cluster-node2

[root@localhost ~]# vim /etc/hosts
172.16.216.228 master01 master01.linuxplus.com
172.16.216.226 master02 master02.linuxplus.com
172.16.216.230 master03 master03.linuxplus.com
172.16.216.234 cluster-node1
172.16.216.235 cluster-node2

[root@localhost ~]# vim /etc/hosts
172.16.216.228 master01 master01.linuxplus.com
172.16.216.226 master02 master02.linuxplus.com
172.16.216.230 master03 master03.linuxplus.com
172.16.216.234 cluster-node1
172.16.216.235 cluster-node2

#三台机器分别重启
[root@localhost ~]# reboot

部署keepalived

#三台服务器分别配置转发
[root@master01 ~]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_forward = 1
> EOF
[root@master01 ~]# sysctl -p
net.ipv4.ip_forward = 1
[root@master02 ~]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_forward = 1
> EOF
[root@master02 ~]# sysctl -p
net.ipv4.ip_forward = 1
[root@master03 ~]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_forward = 1
> EOF
[root@master03 ~]# sysctl -p
net.ipv4.ip_forward = 1
#三台服务器分别安装keepalived
[root@master01 ~]# yum install -y keepalived
[root@master02 ~]# yum install -y keepalived
[root@master03 ~]# yum install -y keepalived
#配置keepalived
[root@master01 ~]# cd /etc/keepalived/
[root@master01 keepalived]# vim keepalived.conf
! Configuration File for keepalived

global_defs {
        router_id LVS_DEVEL
}

vrrp_script check_haproxy {
        script "killall -0 haproxy"
        interval 3
        weight -2
        fall 10
        rise 2
}

vrrp_instance VI_1 {
        state MASTER
        interface ens33
        virtual_router_id 51
        priority 250
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 35f18af7190d51c9f7f78f37300a0cbd
        }
        virtual_ipaddress {
       172.16.216.30/24 dev ens33
        }
        track_script {
           check_haproxy
        }
}
[root@master02 ~]# cd /etc/keepalived/
[root@master02 keepalived]# vim keepalived.conf
! Configuration File for keepalived

global_defs {
        router_id LVS_DEVEL
}

vrrp_script check_haproxy {
        script "killall -0 haproxy"
        interval 3
        weight -2
        fall 10
        rise 2
}

vrrp_instance VI_1 {
        state BACKUP
        interface ens33
        virtual_router_id 51
        priority 249
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 35f18af7190d51c9f7f78f37300a0cbd
        }
        virtual_ipaddress {
       172.16.216.30/24 dev ens33
        }
        track_script {
           check_haproxy
        }
}
[root@master03 ~]# cd /etc/keepalived/
[root@master03 keepalived]# vim keepalived.conf
! Configuration File for keepalived

global_defs {
        router_id LVS_DEVEL
}

vrrp_script check_haproxy {
        script "killall -0 haproxy"
        interval 3
        weight -2
        fall 10
        rise 2
}

vrrp_instance VI_1 {
        state BACKUP
        interface ens33
        virtual_router_id 51
        priority 248
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 35f18af7190d51c9f7f78f37300a0cbd
        }
        virtual_ipaddress {
       172.16.216.30/24 dev ens33
        }
        track_script {
           check_haproxy
        }
}
#启动服务并查看状态
//Master01
[root@master01 keepalived]# systemctl enable keepalived.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@master01 keepalived]# systemctl start keepalived.service 
[root@master01 keepalived]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 21:53:50 CST; 5s ago
  Process: 45326 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 45327 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─45327 /usr/sbin/keepalived -D
           ├─45328 /usr/sbin/keepalived -D
           └─45329 /usr/sbin/keepalived -D

Jun 30 21:53:50 master01 Keepalived_vrrp[45329]: VRRP sockpool: [ifindex(2), proto(112), unicast(0), fd(10,11)]
Jun 30 21:53:51 master01 Keepalived_vrrp[45329]: VRRP_Instance(VI_1) Transition to MASTER STATE
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: VRRP_Instance(VI_1) Entering MASTER STATE
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: VRRP_Instance(VI_1) setting protocol VIPs.
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: Sending gratuitous ARP on ens33 for 172.16.216.30
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: VRRP_Instance(VI_1) Sending/queueing gratuitous ARPs on ens33 for 172.16.216.30
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: Sending gratuitous ARP on ens33 for 172.16.216.30
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: Sending gratuitous ARP on ens33 for 172.16.216.30
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: Sending gratuitous ARP on ens33 for 172.16.216.30
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: Sending gratuitous ARP on ens33 for 172.16.216.30
[root@master01 keepalived]# ip address show ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:37:f5:ef brd ff:ff:ff:ff:ff:ff
    inet 172.16.216.228/24 brd 172.16.216.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 172.16.216.30/24 scope global secondary ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe37:f5ef/64 scope link 
       valid_lft forever preferred_lft forever
//Master02
[root@master02 keepalived]# systemctl enable keepalived.service
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@master02 keepalived]# systemctl start keepalived.service
[root@master02 keepalived]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 21:54:09 CST; 3s ago
  Process: 45054 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 45055 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─45055 /usr/sbin/keepalived -D
           ├─45056 /usr/sbin/keepalived -D
           └─45057 /usr/sbin/keepalived -D

Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Registering gratuitous ARP shared channel
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Opening file '/etc/keepalived/keepalived.conf'.
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: WARNING - default user 'keepalived_script' for script execution does not exist - please create.
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Truncating auth_pass to 8 characters
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Cannot find script killall in path
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Disabling track script check_haproxy since not found
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: VRRP_Instance(VI_1) removing protocol VIPs.
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Using LinkWatch kernel netlink reflector...
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: VRRP_Instance(VI_1) Entering BACKUP STATE
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: VRRP sockpool: [ifindex(2), proto(112), unicast(0), fd(10,11)]
[root@master02 keepalived]# ip address show ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:dd:8b:2b brd ff:ff:ff:ff:ff:ff
    inet 172.16.216.229/24 brd 172.16.216.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fedd:8b2b/64 scope link 
       valid_lft forever preferred_lft forever
//Master03
[root@master03 keepalived]# systemctl enable keepalived.service
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@master03 keepalived]# systemctl start keepalived.service
[root@master03 keepalived]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 21:54:22 CST; 3s ago
  Process: 42102 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 42103 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─42103 /usr/sbin/keepalived -D
           ├─42104 /usr/sbin/keepalived -D
           └─42105 /usr/sbin/keepalived -D

Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: WARNING - default user 'keepalived_script' for script execution does not exist - please create.
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: Truncating auth_pass to 8 characters
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: Cannot find script killall in path
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: Disabling track script check_haproxy since not found
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: VRRP_Instance(VI_1) removing protocol VIPs.
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: Using LinkWatch kernel netlink reflector...
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: VRRP_Instance(VI_1) Entering BACKUP STATE
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: VRRP sockpool: [ifindex(2), proto(112), unicast(0), fd(10,11)]
Jun 30 21:54:22 master03 Keepalived_healthcheckers[42104]: Initializing ipvs
Jun 30 21:54:22 master03 Keepalived_healthcheckers[42104]: Opening file '/etc/keepalived/keepalived.conf'.
[root@master03 keepalived]# ip address show ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:6d:08:5b brd ff:ff:ff:ff:ff:ff
    inet 172.16.216.230/24 brd 172.16.216.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe6d:85b/64 scope link 
       valid_lft forever preferred_lft forever

安装配置haproxy

[root@master01 keepalived]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_nonlocal_bind = 1
> EOF
[root@master01 keepalived]# sysctl -p
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1
[root@master02 keepalived]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_nonlocal_bind = 1
> EOF
[root@master02 keepalived]# sysctl -p
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1
[root@master03 keepalived]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_nonlocal_bind = 1
> EOF
[root@master03 keepalived]# sysctl -p
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1
[root@master01 ~]# yum install -y haproxy
[root@master02 ~]# yum install -y haproxy
[root@master03 ~]# yum install -y haproxy
#修改配置文件
[root@master01 ~]# cd /etc/haproxy/
[root@master01 haproxy]# vim haproxy.cfg
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     40000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
# --------------------------------------------------------------------
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:16443
    option               tcplog
    default_backend      kubernetes-apiserver
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server  master01  172.16.216.228:6443 check
    server  master02  172.16.216.229:6443 check
    server  master03  172.16.216.230:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind               *:1080
    stats auth         admin:awesomePassword
    stats refresh      5s
    stats realm        HAProxy\ Statistics
    stats uri          /admin?stats
[root@master01 haproxy]# systemctl enable haproxy.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@master01 haproxy]# systemctl start haproxy.service 
[root@master01 haproxy]# systemctl status haproxy.service 
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 22:27:15 CST; 6s ago
 Main PID: 80058 (haproxy-systemd)
   CGroup: /system.slice/haproxy.service
           ├─80058 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.p...
           ├─80059 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
           └─80060 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

Jun 30 22:27:15 master01 systemd[1]: Started HAProxy Load Balancer.
Jun 30 22:27:15 master01 systemd[1]: Starting HAProxy Load Balancer...
Jun 30 22:27:15 master01 haproxy-systemd-wrapper[80058]: haproxy-systemd-wrapper: executing /usr...Ds
Jun 30 22:27:15 master01 haproxy-systemd-wrapper[80058]: [WARNING] 180/222715 (80059) : config :...e.
Jun 30 22:27:15 master01 haproxy-systemd-wrapper[80058]: [WARNING] 180/222715 (80059) : config :...e.
Hint: Some lines were ellipsized, use -l to show in full.
[root@master01 haproxy]# ss -lnt |grep -E "16443|1080"
LISTEN     0      128          *:16443                    *:*                  
LISTEN     0      128          *:1080                     *:*     
[root@master01 haproxy]# scp haproxy.cfg root@172.16.216.229:/etc/haproxy/
haproxy.cfg                                                        100% 4320     2.2MB/s   00:00    
[root@master01 haproxy]# scp haproxy.cfg root@172.16.216.230:/etc/haproxy/
haproxy.cfg                                                        100% 4320     3.3MB/s   00:00    
[root@master02 keepalived]# systemctl enable haproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@master02 keepalived]# systemctl start haproxy
[root@master02 keepalived]# systemctl status haproxy
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 22:28:39 CST; 9s ago
 Main PID: 80834 (haproxy-systemd)
   CGroup: /system.slice/haproxy.service
           ├─80834 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.p...
           ├─80839 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
           └─80843 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

Jun 30 22:28:39 master02 systemd[1]: Started HAProxy Load Balancer.
Jun 30 22:28:39 master02 systemd[1]: Starting HAProxy Load Balancer...
Jun 30 22:28:39 master02 haproxy-systemd-wrapper[80834]: haproxy-systemd-wrapper: executing /usr...Ds
Jun 30 22:28:39 master02 haproxy-systemd-wrapper[80834]: [WARNING] 180/222839 (80839) : config :...e.
Jun 30 22:28:39 master02 haproxy-systemd-wrapper[80834]: [WARNING] 180/222839 (80839) : config :...e.
Hint: Some lines were ellipsized, use -l to show in full.
[root@master02 keepalived]# ss -lnt |grep -E "16443|1080"
LISTEN     0      128          *:16443                    *:*                  
LISTEN     0      128          *:1080                     *:*  
[root@master03 keepalived]# systemctl enable haproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@master03 keepalived]# systemctl start haproxy
[root@master03 keepalived]# systemctl status haproxy.service 
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 22:30:08 CST; 16s ago
 Main PID: 82314 (haproxy-systemd)
   CGroup: /system.slice/haproxy.service
           ├─82314 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.p...
           ├─82315 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
           └─82316 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

Jun 30 22:30:08 master03 systemd[1]: Started HAProxy Load Balancer.
Jun 30 22:30:08 master03 systemd[1]: Starting HAProxy Load Balancer...
Jun 30 22:30:08 master03 haproxy-systemd-wrapper[82314]: haproxy-systemd-wrapper: executing /usr...Ds
Jun 30 22:30:08 master03 haproxy-systemd-wrapper[82314]: [WARNING] 180/223008 (82315) : config :...e.
Jun 30 22:30:08 master03 haproxy-systemd-wrapper[82314]: [WARNING] 180/223008 (82315) : config :...e.
Hint: Some lines were ellipsized, use -l to show in full.
[root@master03 keepalived]# ss -lnt |grep -E "16443|1080"
LISTEN     0      128          *:16443                    *:*                  
LISTEN     0      128          *:1080                     *:*

安装配置kubernetes

#------------------系统配置--------------------------------
[root@master01 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master01 ~]# sysctl --system
[root@master02 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master02 ~]# sysctl --system
[root@master03 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master03 ~]# sysctl --system
#---------------安装Docker-----------------------------------
[root@master01 ~]# yum install -y docker
[root@master01 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@master01 ~]# systemctl start docker
[root@master02 ~]# yum install -y docker
[root@master02 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@master02 ~]# systemctl start docker
[root@master03 ~]# yum install -y docker
[root@master03 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@master03 ~]# systemctl start docker
#-------------------------配置yum源----------------------------------------
[root@master01 ~]# cat <<EOF >/etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[root@master01 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
[root@master01 ~]# systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

[root@master02 ~]# cat <<EOF >/etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[root@master02 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
[root@master02 ~]# systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

[root@master03 ~]# cat <<EOF >/etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[root@master03 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
[root@master03 ~]# systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
#-------------------------编辑kubeadmin的配置文件----------------------------------
[root@master01 ~]# cat > kubeadm-config.yaml <<EOF
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.15.0
apiServer:
  certSANs:
    - "cluster.kube.com"
controlPlaneEndpoint: "cluster.kube.com:16443"
networking:
   podSubnet: "10.244.0.0/16"
EOF
#-------------------------------关闭swap-----------------------------------------
[root@master01 ~]# swapoff -a
[root@master01 ~]# vim /etc/fstab
#
# /etc/fstab
# Created by anaconda on Sun Aug  5 13:54:05 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=5c427701-1abf-4d52-821d-b7bdc68ed358 /boot                   xfs     defaults        0 0
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0

[root@master02 ~]# swapoff -a
[root@master02 ~]# vim /etc/fstab
#
# /etc/fstab
# Created by anaconda on Sun Aug  5 13:54:05 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=5c427701-1abf-4d52-821d-b7bdc68ed358 /boot                   xfs     defaults        0 0
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0

[root@master03 ~]# swapoff -a
[root@master03 ~]# vim /etc/fstab
#
# /etc/fstab
# Created by anaconda on Sun Aug  5 13:54:05 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=5c427701-1abf-4d52-821d-b7bdc68ed358 /boot                   xfs     defaults        0 0
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0
[root@master01 ~]# kubeadm init --config kubeadm-config.yaml
[root@master01 ~]# kubeadm init --config kubeadm-config.yaml
[init] Using Kubernetes version: v1.15.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master01 localhost] and IPs [172.16.216.228 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master01 localhost] and IPs [172.16.216.228 127.0.0.1 ::1]
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.216.228 172.16.216.30 172.16.216.30]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.503336 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: z37llz.huyi3c5j1l3tt1uz
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities 
and service account keys on each node and then running the following as root:

  kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz \
    --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481 \
    --experimental-control-plane          

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz \
    --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481 
[root@master01 ~]# mkdir -p $HOME/.kube
[root@master01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
#---------------------------设置网络---------------------------------------------
[root@master01 ~]# kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
[root@master01 ~]# kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
configmap/calico-config created
service/calico-typha created
deployment.apps/calico-typha created
poddisruptionbudget.policy/calico-typha created
daemonset.extensions/calico-node created
serviceaccount/calico-node created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
[root@master01 ~]# kubectl get pods -n kube-system
NAME                               READY   STATUS    RESTARTS   AGE
calico-node-qblqp                  2/2     Running   2          3m30s
coredns-5c98db65d4-d2lgs           0/1     Running   1          32m
coredns-5c98db65d4-hc22b           1/1     Running   1          32m
etcd-master01                      1/1     Running   1          31m
kube-apiserver-master01            1/1     Running   1          31m
kube-controller-manager-master01   1/1     Running   2          31m
kube-proxy-zvdsk                   1/1     Running   1          32m
kube-scheduler-master01            1/1     Running   2          31m
#----------------------------------传输文件-----------------------------------
[root@master01 ~]# ssh root@master02 mkdir -p /etc/kubernetes/pki/etcd
[root@master01 ~]# scp /etc/kubernetes/admin.conf root@master02:/etc/kubernetes/
admin.conf                                                                          100% 5449     2.5MB/s   00:00 
[root@master01 ~]# scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@master02:/etc/kubernetes/pki/
ca.crt                                                                              100% 1025   197.7KB/s   00:00    
ca.key                                                                              100% 1675   522.0KB/s   00:00    
sa.key                                                                              100% 1679   413.0KB/s   00:00    
sa.pub                                                                              100%  451   187.2KB/s   00:00    
front-proxy-ca.crt                                                                  100% 1038   579.2KB/s   00:00    
front-proxy-ca.key                                                                  100% 1675   872.1KB/s   00:00    
[root@master01 ~]# scp /etc/kubernetes/pki/etcd/ca.* root@master02:/etc/kubernetes/pki/etcd/
ca.crt                                                                              100% 1017   663.4KB/s   00:00    
ca.key                                                                              100% 1679     1.1MB/s   00:00    
[root@master01 ~]# ssh root@master03 mkdir -p /etc/kubernetes/pki/etcd
[root@master01 ~]# scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@master03:/etc/kubernetes/pki/
ca.crt                                                                              100% 1025   582.8KB/s   00:00    
ca.key                                                                              100% 1675   960.1KB/s   00:00    
sa.key                                                                              100% 1679     1.2MB/s   00:00    
sa.pub                                                                              100%  451   327.9KB/s   00:00    
front-proxy-ca.crt                                                                  100% 1038   781.4KB/s   00:00    
front-proxy-ca.key                                                                  100% 1675     1.3MB/s   00:00    
[root@master01 ~]# scp /etc/kubernetes/pki/etcd/ca.* root@master03:/etc/kubernetes/pki/etcd/
ca.crt                                                                              100% 1017   579.2KB/s   00:00    
ca.key                                                                              100% 1679   739.3KB/s   00:00    
#-------------------其余master节点加入集群---------------------------------
[root@master02 ~]# kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481 --experimental-control-plane
Flag --experimental-control-plane has been deprecated, use --control-plane instead
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master02 localhost] and IPs [172.16.216.229 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master02 localhost] and IPs [172.16.216.229 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master02 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.216.229 172.16.216.30 172.16.216.30]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[mark-control-plane] Marking the node master02 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master02 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.
[root@master02 ~]# mkdir -p $HOME/.kube
[root@master02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@master03 ~]# kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481 --experimental-control-plane
Flag --experimental-control-plane has been deprecated, use --control-plane instead
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master03 localhost] and IPs [172.16.216.230 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master03 localhost] and IPs [172.16.216.230 127.0.0.1 ::1]
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master03 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.216.230 172.16.216.30 172.16.216.30]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[mark-control-plane] Marking the node master03 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master03 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.
[root@master03 ~]# mkdir -p $HOME/.kube
[root@master03 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master03 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@master01 ~]# kubectl get pods -n kube-system
NAME                               READY   STATUS    RESTARTS   AGE
calico-node-7k75b                  2/2     Running   0          2m42s
calico-node-c2mj9                  2/2     Running   0          115s
calico-node-qblqp                  2/2     Running   2          20m
coredns-5c98db65d4-d2lgs           1/1     Running   1          49m
coredns-5c98db65d4-hc22b           1/1     Running   1          49m
etcd-master01                      1/1     Running   1          48m
etcd-master02                      1/1     Running   0          2m40s
etcd-master03                      1/1     Running   0          115s
kube-apiserver-master01            1/1     Running   1          48m
kube-apiserver-master02            1/1     Running   0          102s
kube-apiserver-master03            1/1     Running   1          111s
kube-controller-manager-master01   1/1     Running   3          48m
kube-controller-manager-master02   1/1     Running   0          99s
kube-controller-manager-master03   1/1     Running   0          48s
kube-proxy-k8zxw                   1/1     Running   0          115s
kube-proxy-m68b4                   1/1     Running   0          2m42s
kube-proxy-zvdsk                   1/1     Running   1          49m
kube-scheduler-master01            1/1     Running   3          48m
kube-scheduler-master02            1/1     Running   0          105s
kube-scheduler-master03            1/1     Running   0          49s

[root@master01 ~]# kubectl get nodes
NAME            STATUS   ROLES    AGE    VERSION
cluster-node1   Ready    <none>   2d4h   v1.15.0
cluster-node2   Ready    <none>   2d4h   v1.15.0
master01        Ready    master   2d6h   v1.15.0
master02        Ready    master   2d5h   v1.15.0
master03        Ready    master   2d5h   v1.15.0

二、Node节点安装配置

[root@localhost ~]# hostnamectl set-hostname cluster-node1
[root@localhost ~]# logout
[root@localhost ~]# hostnamectl set-hostname cluster-node2
[root@localhost ~]# logout
[root@master01 ~]# scp /etc/hosts root@cluster-node1:/etc/
The authenticity of host 'cluster-node1 (172.16.216.234)' can't be established.
ECDSA key fingerprint is SHA256:RSjZGjpxNF+3FfNVScnO7si+ixmb5cvjEQChMZANJl8.
ECDSA key fingerprint is MD5:91:c5:3d:0a:22:4a:51:9b:b6:57:04:c8:f4:10:df:fd.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'cluster-node1,172.16.216.234' (ECDSA) to the list of known hosts.
root@cluster-node1's password: 
hosts                                                                                 100%  389   140.9KB/s   00:00    
[root@master01 ~]# scp /etc/hosts root@cluster-node2:/etc/
The authenticity of host 'cluster-node2 (172.16.216.235)' can't be established.
ECDSA key fingerprint is SHA256:RSjZGjpxNF+3FfNVScnO7si+ixmb5cvjEQChMZANJl8.
ECDSA key fingerprint is MD5:91:c5:3d:0a:22:4a:51:9b:b6:57:04:c8:f4:10:df:fd.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'cluster-node2,172.16.216.235' (ECDSA) to the list of known hosts.
root@cluster-node2's password: 
hosts                                                                                 100%  389    17.7KB/s   00:00  
[root@cluster-node1 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@cluster-node1 ~]# sysctl --system
[root@cluster-node1 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@cluster-node1 ~]# systemctl start docker

[root@cluster-node2 ~]# yum install -y docker
[root@cluster-node2 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@cluster-node2 ~]# sysctl --system
[root@cluster-node2 ~]# yum install -y docker
[root@cluster-node2 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@cluster-node2 ~]# systemctl start docker
#--------------------------------------------------------------------------
[root@cluster-node1 ~]# cat <<EOF >/etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[root@cluster-node2 ~]# cat <<EOF >/etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[root@cluster-node1 ~]# swapoff -a
[root@cluster-node1 ~]# vim /etc/fstab
#
# /etc/fstab
# Created by anaconda on Sun Aug  5 13:54:05 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=5c427701-1abf-4d52-821d-b7bdc68ed358 /boot                   xfs     defaults        0 0
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0
[root@cluster-node1 ~]# docker pull mirrorgooglecontainers/kube-proxy:v1.15.0
Trying to pull repository docker.io/mirrorgooglecontainers/kube-proxy ... 
v1.15.0: Pulling from docker.io/mirrorgooglecontainers/kube-proxy
6cf6a0b0da0d: Already exists 
8e1ce322a1d9: Pull complete 
b593bfa65f6f: Pull complete 
Digest: sha256:63b8aaf1697550f318e9b46e5a7fc019f1d86912f1f3c9d9070bd00aaa361d0b
[root@cluster-node1 ~]# docker pull mirrorgooglecontainers/pause:3.1
Trying to pull repository docker.io/mirrorgooglecontainers/pause ... 
3.1: Pulling from docker.io/mirrorgooglecontainers/pause
67ddbfb20a22: Pull complete 
Digest: sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
Status: Downloaded newer image for docker.io/mirrorgooglecontainers/pause:3.1
[root@cluster-node1 ~]# docker pull coredns/coredns:1.3.1
Trying to pull repository docker.io/coredns/coredns ... 
1.3.1: Pulling from docker.io/coredns/coredns
Digest: sha256:02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4
Status: Image is up to date for docker.io/coredns/coredns:1.3.1
[root@cluster-node1 ~]# docker tag docker.io/mirrorgooglecontainers/kube-proxy:v1.15.0 k8s.gcr.io/kube-proxy:v1.15.0
[root@cluster-node1 ~]# docker tag docker.io/mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1
[root@cluster-node1 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
[root@cluster-node1 ~]# systemctl enable kubelet.service
[root@cluster-node1 ~]# systemctl enable --now kubelet

[root@cluster-node1 ~]# echo 1 > /proc/sys/net/ipv4/ip_forward
[root@cluster-node1 ~]# kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@cluster-node2 ~]# swapoff -a
[root@cluster-node2 ~]# vim /etc/fstab
#
# /etc/fstab
# Created by anaconda on Sun Aug  5 13:54:05 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=5c427701-1abf-4d52-821d-b7bdc68ed358 /boot                   xfs     defaults        0 0
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0

[root@cluster-node2 ~]# docker pull mirrorgooglecontainers/kube-proxy:v1.15.0
Trying to pull repository docker.io/mirrorgooglecontainers/kube-proxy ... 
v1.15.0: Pulling from docker.io/mirrorgooglecontainers/kube-proxy
6cf6a0b0da0d: Already exists 
8e1ce322a1d9: Pull complete 
b593bfa65f6f: Pull complete 
Digest: sha256:63b8aaf1697550f318e9b46e5a7fc019f1d86912f1f3c9d9070bd00aaa361d0b
Status: Downloaded newer image for docker.io/mirrorgooglecontainers/kube-proxy:v1.15.0
[root@cluster-node2 ~]# docker pull mirrorgooglecontainers/pause:3.1
Trying to pull repository docker.io/mirrorgooglecontainers/pause ... 
3.1: Pulling from docker.io/mirrorgooglecontainers/pause
67ddbfb20a22: Pull complete 
Digest: sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
Status: Downloaded newer image for docker.io/mirrorgooglecontainers/pause:3.1
[root@cluster-node2 ~]# docker tag docker.io/mirrorgooglecontainers/kube-proxy:v1.15.0 k8s.gcr.io/kube-proxy:v1.15.0
[root@cluster-node2 ~]# docker tag docker.io/mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1
[root@cluster-node2 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
[root@cluster-node2 ~]# systemctl enable kubelet.service
[root@cluster-node2 ~]# systemctl enable --now kubelet

[root@cluster-node2 sysctl.d]# echo 1 > /proc/sys/net/ipv4/ip_forward
[root@cluster-node2 sysctl.d]# kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.


原文地址:https://blog.51cto.com/stuart/2418373

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


LinuxSystemd服务(2021.07.09)目录LinuxSystemd服务(2021.07.09)一、概述二、配置文件2.1Unit2.2Service2.3Install三、开机启动四、启动服务五、查看状态六、停止服务七、重启服务一、概述本文将介绍通过systemd来实现服务的自启动。systemd是一套系统启动和管理的工具,字
opidrvabortingprocessM002ospid(3561)asaresultofORA-600ORA-27300:操作系统相关操作:semctl失败,状态为:22ORA-27301:操作系统故障消息:InvalidargumentORA-27302:错误发生在:sskgpwrm1ORA-27157:已删除了操作系统发送/等待功能ORA-27300:操作系统相关操作
安装好haproxy后,配置正确无法启动,看日志:Feb1309:32:50cluster-node2systemd:StartedHAProxyLoadBalancer.Feb1309:32:50cluster-node2haproxy-systemd-wrapper:[ALERT]043/093250(6538):Startingproxymysql-pxc-cluster:cannotbindsocket[192.168.22.3
Linux 系统与服务管理工具Systemd被曝存在3大漏洞,影响几乎所有Linux发行版。Systemd是Linux系统的基本构建块,它提供了对系统和服务的管理功能,以PID1运行并启动系统的其它部分。目前大部分Linux发行版都以Systemd取代了原有的SystemV。安全公司Qualys近日发
一、systemd查看日志文件有隐藏 systemctlstatusSERVICE-l-l选项显示完整选项 journalctl-uSERVICE使用journalct命令查看 二、写一个systemd的配置文件,让nginx服务可以开机启动[Unit]Description=nginx[Service]Type=forkingPIDFile=/varunginx.pidExec
不要在mp目录下保存文件,该目录会定期清理文件mp默认保存10天/varmp默认保存30天配置文件:/usr/libmpfiles.dmp.conf默认配置文件:#Thisfileispartofsystemd.##systemdisfreesoftware;youcanredistributeitand/ormodifyit#underthetermsofthe
Step1:查看系统默认运行级别[root@node-1html]#systemctlget-default    //图形界面graphical.target[root@node-1html]#systemctllist-units--type=target  //查看支持的运行级别Step2:更改运行级别为level3 [root@node-1html]#systemctlset-defaultm
1.安装蓝牙驱动管理#apt-getinstallblueman2.打开蓝牙驱动管理,关闭设备3.关闭蓝牙开机启动服务#systemctldisablebluetooth.service#/lib/systemd/systemd-sysv-installdisablebluetooth4.重启reboot 
dhcpcd项目地址:http://www.linuxfromscratch.org/blfs/view/stable-systemd/basicnet/dhcpcd.html1.下载dhcpcd包并校验md5wgethttp:/oy.marples.name/downloads/dhcpcd/dhcpcd-7.0.7.tar.xzmd5sum-cmd5sums2.解压并进入包目录tar-xvfdhcpcd-7.0.7.tar.xzcddhcp
1.背景首先,我们先看一下/etc/init.d/README内容:Youarelookingforthetraditionalinitscriptsin/etcc.d/init.d,andtheyaregone?Here'sanexplanationonwhat'sgoingon:Youarerunningasystemd-basedOSwheretraditionalinitscriptshavebe
早就发现了,Arch的systemd提供的那个rc-local.service貌似有问题,rc.local不会执行。因为没用rc.local,一直没管。解决方法源自这里,需要稍加改动:http://superuser.com/questions/278396/systemd-does-not-run-etc-rc-local建立文件/etc/systemd/systemclocal.service(我怕和系
转载:https://www.cnblogs.com/sparkdev/p/8521812.html我们运行linux服务器的主要目的是通过运行程序提供服务,比如mysql、webserver等。因此管理linux服务器主要工作就是配置并管理上面运行的各种服务程序。在linux系统中服务程序的管理主要由init系统负责。如同笔者在
系统:Ubuntu18.04.02K8s版本:1.13.4故障现象:安装KubeDNS后,Pod内无法ping通外网域名,访问外网IP、K8s内部域名或者IP均正常  原因分析:查看Pod中的resolv.conf:kubectlexecbusybox--cat/etcesolv.confnameserver10.96.0.10searchdefault.svc.cluster.localsvc.cl
1.journalctl :日志查看工具journalctl -n5 //查看最近3条日志journalctl -perr //查看错误日志journalctl -overbose //查看日志的详细参数journalctl --since //查看从什么时间开始的日志journalctl --until //查看到什么时间为止的日志
此案例是以一个主,三个node来部署的,当然node可以根据自己情况部署192.168.1.130master192.168.1.131node1192.168.1.132node2192.168.1.133node3合法的EnableNTPonmasterandallnodes:[root@k-master~]#yum-yinstallntp[root@k-master~]#systemctlstartntpd[r
常用安装包下载yuminstall-yepel-releaseyum-yinstallbash-completionyum-yinstallnet-toolsyum-yinstalliprouteyum-yinstallwgetvimyum-yinstalllrzsznmaptreedos2unixnctelnetyum-yinstallopenssl一、系统类型1.1sysvinit1.系统第一个进程(p
修改了/etc/systemd/system.conf以后,发现不生效?修改了/etc/systemd/system.conf以后,必须使用systemctldaemon-reexec命令才能生效,使用systemctldaemon-reload是没有用的。daemon-reload重新加载的是所有单元文件,而不是systemd本身的配置。一定要注意了。被坑了。#addin/
Manjaro启动项目及服务配置备忘===============系统服务GUI管理搜索 systemdgenie 并安装,类似Windows的服务管理。================系统启动项目的快捷方式放在如下2个地方:/etc/xdg/autostart/cd~/.config/autostart,比如:/homeom/.config/autostart/===============#net
*1、systemd查看日志文件有隐藏该如何处理?答:Centos7.x使用systemd提供的journalctl日志管理a.基本上,系统由systemd所管理,那所有经由systemd启动的服务()如果在启动或结束的过程中发生了一些问题或是正常的信息),就会将该信息由systemd-journald.service以二进制的方式记录下来,之后
环境:centos7 创建的开机启动的链接地址: /etc/systemd/system/multi-user.target.wants/ 如:[root@tiaobanjisystem]#ll/etc/systemd/system/multi-user.target.wantsotal0lrwxrwxrwx.1rootroot38Feb2812:18auditd.service->/usr/lib/systemd/system/audit