高可用k8s集群搭建1.17.0

高可用k8s集群搭建1.17.0(ipvs网络)

借鉴文档:https://jimmysong.io/kubernetes-handbook/practice/create-tls-and-secrek8s微服务架构t-key.html

安装环境

Master 高可用
  • 官网介绍

​​https://k云计算的特点ubernetes.io/zhdocker是干什么的/docs/admin/high-availability/#%E5%A4%8D%E5%88%B6%E7%9A%84API%E6%9Ck8s和docker区别%8D%E5%8A%A1
​​

  • 借鉴文档:https://blog.51cto.com/ylw6006/2164981 和 https://yq.aliyun.com/ardockerfileticles/679600

安装前的系统配置

所有要加入k8s集群的机器都要执行

  • 设置集群所有机器免密钥登录
# ssh-keygen
# 将id_rsa.pub内容复制到其他机器的~/.ssh/authorized_keys中

  • 设置hosts
(设置永久主机名称,然后重新登录)
# 设置hostsname主机名
# hostnamectl set-hostname kube-master
# cat /etc/hosts
kube-node1 10.2.33.5 nginx.btcexa.com test.btcexa.com k8s.grafana.btcexa.com
kube-node2 10.2.33.127 nginx.btcexa.com test.btcexa.com
kube-node3 10.2.33.65 nginx.btcexa.com test.btcexa.com
10.2.33.5 nginx.btcexa.com test.btcexa.com test-ning.btcexa.com k8s.grafana.btcexa.com k8s.prometheus.btcexa.com traefik-admin.btcexa.com traefik-nginx.btcexa.com

内核配置


  • 升级CentOdocker安装S软件包及内核
yum -y update
yum -y install yum-plugin-fastestmirror
yum install -y epel-release
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
yum -y --enablerepo=elrepo-kernel install kernel-ml
  • 设置默认启动内核为最新安装版本
grub2-set-default 0
grub2-mkconfig -o /boot/grub2/grub.cfg
  • 设置关闭dockerfile防火墙及SELdockers什么品牌INUX
systemctl stop firewalld && systemctl disable firewalld
sed -i "s/SELINUX=.*/SELINUX=disabled/g" /etc/selinux/config
setenforce 0
swapoff -a
sed -i '/^.*swap.*/d' /etc/fstab

  • 系统内核配置
- 修改iptables的内核参数
# modprobe overlay
# modprobe br_netfilter

# Setup required sysctl params, these persist across reboots.
cat > /etc/sysctl.d/99-kubernetes-cri.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

sysctl --system

或者执行下面
# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
# echo 1 > /proc/sys/net/bridge/bridge-nf-call-ip6tables

# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

# sysctl -p /etc/sysctl.d/k8s.conf



[ -f /proc/sys/fs/may_detach_mounts ] && sed -i "/fs.may_detach_mounts/ d" /etc/sysctl.conf
[ -f /proc/sys/fs/may_detach_mounts ] && echo "fs.may_detach_mounts=1" >> /etc/sysctl.conf
sysctl -p

# 非常重要
sysctl -w net.ipv6.conf.all.disable_ipv6=0
  • 设置ldocker安装部署imits.conf
cat >> /etc/security/limits.conf << EOF
* soft nproc 1024000
* hard nproc 1024000
* soft nofile 1024000
* hard nofile 1024000
* soft core 1024000
* hard core 1024000
######big mem ########
#* hard memlock unlimited
#* soft memlock unlimited
EOF
  • 设置20-nproc.conf
sed -i 's/4096/1024000/' /etc/security/limits.d/20-nproc.conf
  • 设置 journal 日志大小及存k8s面试题储路径
echo SystemMaxUse=600M >>/etc/systemd/journald.conf
mkdir -p /var/log/journal
chown root:systemd-journal /var/log/journal
chmod 2755 /var/log/journal
systemctl restart systemd-journald

开启ipvsdocker (kube-proxy)


  • 安装依赖命令行(工具集)
# yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp socat fuse fuse-libs nfs-utils nfs-utils-lib pciutils ebtables ethtool
  • 云计算的特点时生效ipvs
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
  • 永久生效ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

# sysctl -p
  • 查看ipvs是否成功
# lsmod|grep ip_vs
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145497 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 133095 7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c 12644 4 xfs,ip_vs,nf_nat,nf_conntrack

# 确认br_netfilter模块
# lsmod | grep br_netfilter

# 启用此内核模块,以便遍历桥的数据包由iptables进行处理以进行过滤和端口转发,并且群集中的kubernetes可以相互通信
modprobe br_netfilter

# 若kube-proxy需要开启ipvs,则下述模块需要存在
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
可选:
若kube-proxy需要开启ipvs,则下述模块需要存在, 在所有的Kubernetes节点

kubernetes 安装


  • 安装全docker安装局环境变量
# mkdir -p /opt/k8s/{bin,ssl,cfg}
- 生成apiserver的token文件
# date|sha1sum |awk '{print $1}'
b681138df1a8e0c2ddb8daff35490435caa5ff7a
# cd /opt/k8s/ssl
# cat > /opt/k8s/ssl/token.csv <<EOF
b681138df1a8e0c2ddb8daff35490435caa5ff7a,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

- 配置(可以忽略)
cat > /opt/k8s/ssl/basic-auth.csv <<EOF
admin,admin,1
readonly,readonly,2
EOF
# vim /opt/k8s/env.sh

export BOOTSTRAP_TOKEN=b681138df1a8e0c2ddb8daff35490435caa5ff7a

# 最好使用 当前未用的网段 来定义服务网段和 Pod 网段

# 服务网段,部署前路由不可达,部署后集群内路由可达(kube-proxy 和 ipvs 保证)
SERVICE_CIDR="10.254.0.0/16"

# Pod 网段,建议 /16 段地址,部署前路由不可达,部署后集群内路由可达(flanneld 保证)
CLUSTER_CIDR="10.10.0.0/16"

# 服务端口范围 (NodePort Range)
export NODE_PORT_RANGE="30000-50000"

# 集群各机器 IP 数组
export NODE_IPS=(10.2.33.5 10.2.33.127 10.2.33.65)

# 集群各 IP 对应的 主机名数组
export NODE_NAMES=(kube-node1 kube-node2 kube-node3)

# kube-apiserver 节点 IP
export MASTER_IP=0.0.0.0

# 内网访问kube-apiserver https 地址
export KUBE_APISERVER="https://kubernetes.exa.local:6443"


# 外网访问kube-apiserver https 地址
export KUBE_PUBLIC_APISERVER="https://kubernetes.btcexa.com:6443"

# etcd 集群服务地址列表
export ETCD_ENDPOINTS="https://10.2.33.5:2379,https://10.2.33.127:2379,https://10.2.33.65:2379"

# flanneld 网络配置前缀
export FLANNEL_ETCD_PREFIX="/kubernetes/network"

# kubernetes 服务 IP (一般是 SERVICE_CIDR 中第一个IP)
export CLUSTER_KUBERNETES_SVC_IP="10.254.0.1"

# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
export CLUSTER_DNS_SVC_IP="10.254.0.2"

# 集群 DNS 域名
export CLUSTER_DNS_DOMAIN="cluster.local."

安装cfssl工具k8s证书,用具签发证书。

  • 主节点安装就可以
cd  /opt/k8s/
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
mv cfssl_linux-amd64 /usr/bin/cfssl
mv cfssljson_linux-amd64 /usr/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

安装docker,dockerfile并配置docker的镜像仓库

# 安装DOCKER
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce -y

# 设置docker镜像仓库
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://bc437cce.m.daocloud.io

# 重启docker
(
systemctl daemon-reload
systemctl restart docker
systemctl enable docker
)

生成etcd证书

  • 创建一个存入证书的目录,并且进入该目录。
cd /opt/k8s/ssl
  • 创建生成ca证书的json文件,内容云计算与物联网的关系如下。expiry设置长一点,要不然证书失效很麻烦。
cat > ca-config.json <<EOF

{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
  • 创建证书签名请求的json文件
cat > ca-csr.json <<EOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Shanghai",
"ST": "Shanghai"
}
]
}
EOF
  • 生成CA证书(cadockerhub.pem)和密钥(ca-key.pem)
# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
2019/12/26 09:33:53 [INFO] generating a new CA key and certificate from CSR
2019/12/26 09:33:53 [INFO] generate received request
2019/12/26 09:33:53 [INFO] received CSR
2019/12/26 09:33:53 [INFO] generating key: rsa-2048
2019/12/26 09:33:53 [INFO] encoded CSR
2019/12/26 09:33:53 [INFO] signed certificate with serial number 76090837348387020865481584188520719234232827929
- 生成结果如下
ls ./
ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem
  • 为etcd生成证书
cat > etcd-csr.json <<EOF
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"10.254.0.1",
"kubernetes.exa.local",
"kubernetes.btcexa.com",
"harbor.btcexa.com",
"10.2.33.5",
"10.2.33.127",
"10.2.33.65"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai"
}
]
}
EOF
重点说明下,由于flanneld用到了etcd证书,这里要把规划网段的第一个地址加进去。否则,内部访问http://10.254.0.1:443 报证书错误。

  • 生成
cfssl gencert -ca=./ca.pem  -ca-key=./ca-key.pem  -config=./ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
2019/12/26 09:34:26 [INFO] generate received request
2019/12/26 09:34:26 [INFO] received CSR
2019/12/26 09:34:26 [INFO] generating key: rsa-2048
2019/12/26 09:34:26 [INFO] encoded CSR
2019/12/26 09:34:26 [INFO] signed certificate with serial number 680872829262173782320244647098818402787647586534
2019/12/26 09:34:26 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

安装etcd

  • 下载并解k8s面试题压,然后复制到安装目录下。
官方网址:https://github.com/etcd-io/etcd/releases
# cd /opt/k8s && wget https://github.com/coreos/etcd/releases/download/v3.3.13/etcd-v3.3.13-linux-amd64.tar.gz
# tar xf etcd-v3.3.13-linux-amd64.tar.gz
  • 用以下etcd安装脚本,生成etcd的配置文件和 systemd 的服务配置,内容如下。
# vim init-etcd.sh
#!/bin/bash
source /opt/env.sh

ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3
WORK_DIR=/opt/etcd

cat <<EOF >$WORK_DIR/cfg/etcd
#[Member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379"
ETCD_INITIAL_CLUSTER="${ETCD_CLUSTER}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

cat <<EOF >/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=${WORK_DIR}/cfg/etcd
ExecStart=${WORK_DIR}/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=${WORK_DIR}/ssl/etcd.pem \
--key-file=${WORK_DIR}/ssl/etcd-key.pem \
--peer-cert-file=${WORK_DIR}/ssl/etcd.pem \
--peer-key-file=${WORK_DIR}/ssl/etcd-key.pem \
--trusted-ca-file=${WORK_DIR}/ssl/ca.pem \
--peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
  • 部署安装 etcd
vim etcd_install.sh
#!/bin/bash
cp -avr /opt/k8s/env.sh /opt/env.sh
source /opt/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
#####etcd
# 创建etcd目录
ssh root@${node_ip} "mkdir -p /opt/etcd/{cfg,bin,ssl}"
# 拷贝执行程序
scp /opt/k8s/etcd-v3.3.13-linux-amd64/{etcd,etcdctl} root@${node_ip}:/opt/etcd/bin/
scp /opt/k8s/env.sh root@${node_ip}:/opt/
# 拷贝配置文件生成脚本
scp /opt/k8s/init-etcd.sh root@${node_ip}:/opt/
# 拷贝证书
cd /opt/k8s/ssl/
scp etcd*pem ca*.pem root@${node_ip}:/opt/etcd/ssl/

#####
done
ssh root@10.2.33.5 "cd /opt/ && sh init-etcd.sh etcd01 10.2.33.5 etcd01=https://10.2.33.5:2380,etcd02=https://10.2.33.127:2380,etcd03=https://10.2.33.65:2380"

ssh root@10.2.33.127 "cd /opt && sh init-etcd.sh etcd02 10.2.33.127 etcd01=https://10.2.33.5:2380,etcd02=https://10.2.33.127:2380,etcd03=https://10.2.33.65:2380"

ssh root@10.2.33.65 "cd /opt/ && sh init-etcd.sh etcd03 10.2.33.65 etcd01=https://10.2.33.5:2380,etcd02=https://10.2.33.127:2380,etcd03=https://10.2.33.65:2380"

# sh etcd_install.sh
先在主节点启动etcd,然后终端会被占用。需要到另外两个dockerfile节点也启动etcd后,主节点的终端才可以释放。 三台节点都执行 sys云计算最简单解释temctl start etcd
systemctl daemon-reload && systemctl enable etcd && systemctl start etcd

# 测试, 三台etcd都启动以后测试。正确输出如下。
/opt/etcd/bin/etcdctl --endpoints=https://10.2.33.5:2379 --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/etcd.pem --key-file=/opt/etcd/ssl/etcd-key.pem cluster-health
member 255b6ed818720e20 is healthy: got healthy result from https://10.2.33.65:2379
member cbc6185ed5ac53ae is healthy: got healthy result from https://10.2.33.127:2379
member ccdbf5bbe09e862d is healthy: got healthy result from https://10.2.33.5:2379
cluster is healthy


安装kubernetes

Masterdocker常用命令节点安装

生成Master 证书

  • 生成apiserver的证书
/opt/k8s/ssl   # 在生成etcd的证书时也用的这套ca
cd /opt/k8s/ssl
cat > /opt/k8s/ssl/kubernetes-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"10.2.33.5",
"10.2.33.127",
"10.2.33.65",
"10.254.0.1",
"kubernetes.exa.local",
"kubernetes.btcexa.com",
"harbor.btcexa.com",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "k8s",
"OU": "System"
}
]
}
EOF
解释:所有master相关的IP,都要添加到上面。并需要把集群第一个地址10.254.0.1添加。

# cfssl gencert -ca=./ca.pem -ca-key=./ca-key.pem -config=./ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
2019/12/26 09:40:28 [INFO] generate received request
2019/12/26 09:40:28 [INFO] received CSR
2019/12/26 09:40:28 [INFO] generating key: rsa-2048
2019/12/26 09:40:29 [INFO] encoded CSR
2019/12/26 09:40:29 [INFO] signed certificate with serial number 79307740170237095958081306786566929940321574452
2019/12/26 09:40:29 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

- 查看文件

ls ./
ca-config.json ca-csr.json ca.pem etcd.pem kubernetes.csr kubernetes-key.pem
ca.csr ca-key.pem etcd-key.pem init-etcd.sh kubernetes-csr.json kubernetes.pem


  • 生成kubectl证书
cd /opt/k8s/ssl
# cat > /opt/k8s/ssl/admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "system:masters",
"OU": "System"
}
]
}
EOF

- 生成kubectl的管理工具证书

# cfssl gencert -ca=./ca.pem -ca-key=./ca-key.pem -config=./ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
2019/12/26 09:40:53 [INFO] generate received request
2019/12/26 09:40:53 [INFO] received CSR
2019/12/26 09:40:53 [INFO] generating key: rsa-2048
2019/12/26 09:40:53 [INFO] encoded CSR
2019/12/26 09:40:53 [INFO] signed certificate with serial number 232498819813658091378247501835328406476549876286
2019/12/26 09:40:53 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

下载文件,解压,复制文件

下载地址:https://kubernetes.io/docs/setup/release/notes/
cd /opt/k8s && wget https://storage.googleapis.com/kubernetes-release/release/v1.16.4/kubernetes-server-linux-amd64.tar.gz

cd /opt/k8s && wget https://storage.googleapis.com/kubernetes-release/release/v1.17.0/kubernetes-server-linux-amd64.tar.gz
tar -xf kubernetes-server-linux-amd64.tar.gz
mkdir -p /opt/kubernetes/{cfg,bin,ssl}
- 复制执行程序文件到安装目录下
cd /opt/k8s/kubernetes/server/bin/
\cp -avr kube-apiserver kube-controller-manager kube-scheduler kubectl /opt/kubernetes/bin/
- 复制执行程序文件到高可用master安装目录下
scp kube-apiserver kube-controller-manager kube-scheduler kubectl root@10.2.33.127:/opt/kubernetes/bin/


- 复制证书文件到kubernetes的ssl目录下
cd /opt/k8s/ssl
\cp -avr kubernetes*pem ca*pem adm* token.csv token.csv /opt/kubernetes/ssl/
scp kubernetes*pem ca*pem adm* token.csv root@10.2.33.127:/opt/kubernetes/ssl/

安装 apiserver

  • 执行以下脚本安装apiserver
cd /opt/k8s
vim install-apiserver.sh
#!/bin/bash
source /opt/k8s/env.sh
#MASTER_ADDRESS=${1:-"10.2.33.5"}
#ETCD_SERVERS=${2:-"http://127.0.0.1:2379"}

cat <<EOF >/opt/kubernetes/cfg/kube-apiserver

KUBE_APISERVER_OPTS="--logtostderr=true \\
--v=4 \\
--etcd-servers=${ETCD_ENDPOINTS} \\
--insecure-bind-address=127.0.0.1 \\
--bind-address=${MASTER_IP} \\
--insecure-port=8080 \\
--secure-port=6443 \\
--advertise-address=${MASTER_IP} \\
--allow-privileged=true \\
--service-cluster-ip-range=${SERVICE_CIDR} \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/ssl/token.csv \\
--service-node-port-range=${NODE_PORT_RANGE} \\
--tls-cert-file=/opt/etcd/ssl/etcd.pem \\
--tls-private-key-file=/opt/etcd/ssl/etcd-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/etcd.pem \\
--etcd-keyfile=/opt/etcd/ssl/etcd-key.pem"
EOF

cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target
EOF

- 执行上面的脚本
sh install-apiserver.sh

- 复制配置文件和启动脚本到其他master节点
# scp /opt/kubernetes/cfg/kube-apiserver root@10.2.33.127:/opt/kubernetes/cfg/
# scp /usr/lib/systemd/system/kube-apiserver.service root@10.2.33.127:/usr/lib/systemd/system/

安装controller-manager

  • 使用下面的安装脚本安装云计算的概念
# vim  install-controller-manager.sh
#!/bin/bash
source /opt/k8s/env.sh
MASTER_ADDRESS=${1:-"127.0.0.1"}

cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager


KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=${SERVICE_CIDR} \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

- 执行安装

# sh install-controller-manager.sh 127.0.0.1

- 复制配置文件和启动脚本到其他master节点
# scp /opt/kubernetes/cfg/kube-controller-manager root@10.2.33.127:/opt/kubernetes/cfg/
# scp /usr/lib/systemd/system/kube-controller-manager.service root@10.2.33.127:/usr/lib/systemd/system/

安装scheduler

  • 使用以下脚本安装 kube-docker常用命令scheduler 服务
# vim  install_kube-scheduler.sh
#!/bin/bash
#
MASTER_ADDRESS=${1:-"127.0.0.1"}

cat <<EOF >/opt/kubernetes/cfg/kube-scheduler

KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

- 安装

# sh install_kube-scheduler.sh 127.0.0.1

- 复制配置文件和启动脚本到其他master节点
# scp /opt/kubernetes/cfg/kube-scheduler root@10.2.33.127:/opt/kubernetes/cfg/
# scp /usr/lib/systemd/system/kube-scheduler.service root@10.2.33.127:/usr/lib/systemd/system/
  • 启动Master 节点程序
(
systemctl daemon-reload
systemctl enable kube-apiserver && systemctl restart kube-apiserver && systemctl status kube-apiserver

systemctl enable kube-controller-manager && systemctl restart kube-controller-manager && systemctl status kube-controller-manager

systemctl enable kube-scheduler && systemctl restart kube-scheduler && systemctl status kube-scheduler
)
(
systemctl status kube-apiserver
systemctl status kube-controller-manager
systemctl status kube-scheduler
)

安装kubectl

  • 1)使用以下脚本安装 kubectl服务docker安装(生成内docker安装部署网kubeconfig)
内网IP: awsDns(kubernetes.exa.local)---> aws(内网a云计算与物联网的关系lb)TCP模式 --> 目标组TCP模式--->kdocker安装8sk8s是什么Master节点(6443端口)
cat > /opt/k8s/kubectl_private_install.sh << EOF
# 获取环境变量
source /opt/k8s/env.sh
# 设置apiserver访问地址
#KUBE_APISERVER='https://kubernetes.exa.local:6443'

# 设置集群参数
/opt/kubernetes/bin/kubectl config set-cluster kubernetes --certificate-authority=/opt/kubernetes/ssl/ca.pem --embed-certs=true --server=\${KUBE_APISERVER} --kubeconfig=admin_private.kubeconfig

# 设置客户端认证参数
/opt/kubernetes/bin/kubectl config set-credentials admin --client-certificate=/opt/kubernetes/ssl/admin.pem --embed-certs=true --client-key=/opt/kubernetes/ssl/admin-key.pem --kubeconfig=admin_private.kubeconfig

# 设置上下文件参数
# /opt/kubernetes/bin/kubectl config set-context kubernetes --cluster=kubernetes --user=admin --namespace=kube-system --kubeconfig=admin_private.kubeconfig
/opt/kubernetes/bin/kubectl config set-context kubernetes --cluster=kubernetes --user=admin --namespace=default --kubeconfig=admin_private.kubeconfig

# 设置默认上下文
/opt/kubernetes/bin/kubectl config use-context kubernetes --kubeconfig=admin_private.kubeconfig
EOF

配置kubectl服务(只需要在一遍即可(在一个master节点上就ok...))
# sh /opt/k8s/kubectl_private_install.sh
Cluster "kubernetes" set.
User "admin" set.
Context "kubernetes" created.
Switched to context "kubernetes".

#将admin_private.kubeconfig拷贝到/root/.kube/config文件
cp /opt/k8s/admin_private.kubeconfig /root/.kube/config

  • 2)使用以下脚本安装 kubectl服务(生成外网kubeconfig)
外放访问:aws(docker安装部署Dn云计算技术与应用专业sdocker容器)---> aws(外网alb)TCP模式 --> 目标组TCP模式--->k8sMaster节点(6443端口)
cat > /opt/k8s/kubectl_public_install.sh << EOF
# 获取环境变量
#source /opt/k8s/env.sh
# 设置apiserver访问地址
KUBE_APISERVER='https://kubernetes.btcexa.com:6443'

# 设置集群参数
/opt/kubernetes/bin/kubectl config set-cluster kubernetes --certificate-authority=/opt/kubernetes/ssl/ca.pem --embed-certs=true --server=\${KUBE_APISERVER} --kubeconfig=admin_public.kubeconfig

# 设置客户端认证参数
/opt/kubernetes/bin/kubectl config set-credentials admin --client-certificate=/opt/kubernetes/ssl/admin.pem --embed-certs=true --client-key=/opt/kubernetes/ssl/admin-key.pem --kubeconfig=admin_public.kubeconfig

# 设置上下文件参数
# /opt/kubernetes/bin/kubectl config set-context kubernetes --cluster=kubernetes --user=admin --namespace=kube-system --kubeconfig=admin_public.kubeconfig

/opt/kubernetes/bin/kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=admin_public.kubeconfig

# 设置默认上下文
/opt/kubernetes/bin/kubectl config use-context kubernetes --kubeconfig=admin_public.kubeconfig
EOF

配置kubectl服务(只需要在一遍即可(在一个master节点上就ok...))
# sh /opt/k8s/kubectl_public_install.sh
Cluster "kubernetes" set.
User "admin" set.
Context "kubernetes" created.
Switched to context "kubernetes".

#将admin_public.kubeconfig拷贝到/root/.kube/config文件
cp /opt/k8s/admin_public.kubeconfig /root/.kube/config
# 如果需要通过公网管理集群(测试下来公网操作慢!!)
scp /opt/k8s/admin_public.kubeconfig root@10.2.33.127:/root/.kube/config
  • 所有master节点添加环境变量
- 把kubernetes命令添加到环境变量(所有master节点上)
cat > /etc/profile.d/k8s.sh <<EOF
#!/bin/bash
export PATH=\$PATH:/opt/kubernetes/bin/
EOF
source /etc/profile.d/k8s.sh

  • 使用kubectl命令检查多master是否安装成功,在每个master节点上都执行,检查是否正常。
# kubectl get cs //(unknown问题暂时未解决1.16.0和1.16.4都有这样问题,1.17.0没有问题赞)
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-1 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}

# kubectl cluster-info
Kubernetes master is running at https://kubernetes.exa.local:6443

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.


安装node节点、添加node节点(机器初始化docker安装等)

安装 kubelet、ku云计算技术与应用be-porxy、flannel插件

  • 生成kube-prodockers什么品牌xy证书
cat > /opt/k8s/ssl/kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "k8s",
"OU": "System"
}
]
}
EOF

# 生成证书
# cd /opt/k8s/ssl/
# cfssl gencert -ca=/opt/k8s/ssl/ca.pem -ca-key=/opt/k8s/ssl/ca-key.pem -config=/opt/k8s/ssl/ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
2019/12/26 09:59:43 [INFO] generate received request
2019/12/26 09:59:43 [INFO] received CSR
2019/12/26 09:59:43 [INFO] generating key: rsa-2048
2019/12/26 09:59:43 [INFO] encoded CSR
2019/12/26 09:59:43 [INFO] signed certificate with serial number 157028017693635972642773375308791716823103748513
2019/12/26 09:59:43 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

  • 生成flannel证书k8s是什么
cat > flanneld-csr.json <<EOF
{
"CN": "flanneld",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "shanghai",
"O": "system:masters",
"OU": "System"
}
]
}
EOF

- 生成证书
# cd /opt/k8s/ssl
# cfssl gencert -ca=/opt/k8s/ssl/ca.pem \
-ca-key=/opt/k8s/ssl/ca-key.pem \
-config=/opt/k8s/ssl/ca-config.json \
-profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
2019/12/26 10:00:09 [INFO] generate received request
2019/12/26 10:00:09 [INFO] received CSR
2019/12/26 10:00:09 [INFO] generating key: rsa-2048
2019/12/26 10:00:09 [INFO] encoded CSR
2019/12/26 10:00:09 [INFO] signed certificate with serial number 113796707096533245041379767771722538790347756007
2019/12/26 10:00:09 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
  • 创建角色绑定
# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

  • 在master节点上docker安装,生成bootstrap.kubeconk8s证书fig kube云计算是什么-proxy.kubeck8s证书onfig 证书。
cd /opt/k8s/

vim gen-kubeconfig.sh
# 读取环境变量
source /opt/k8s/env.sh
#---------创建kubelet bootstrapping kubeconfig------------
#BOOTSTRAP_TOKEN=c76835f029914e3693a9834295bb840910211916 # 要与/opt/kubernetes/ssl/token.csv一致

# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig

# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

#---------创建kubelet bootstrapping kubeconfig-------------

# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
--client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig

# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

  • 生成bootstrap.kubeconfig kube-proxy.docker菜鸟教程kubek8s微服务架构configdocker安装部署 证书
# cp /opt/k8s/ssl/kube-proxy*.pem /opt/kubernetes/ssl/
# cd /opt/k8s/ && sh gen-kubeconfig.sh

  • 下载flannel安装包
下载站点版本:https://github.com/coreos/flannel/releases/
# cd /opt/k8s/ && wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
# tar xf flannel-v0.11.0-linux-amd64.tar.gz
  • 然后到/opt/etcd/ssl执行(Falnnel要用etcd存储自身一个子网信息,所以要保证能成功连接Etcd,写入预定义子网段:云计算是对什么技术的发展和应用)
source /opt/k8s/env.sh
/opt/etcd/bin/etcdctl \
--endpoints=${ETCD_ENDPOINTS} \
--ca-file=/opt/k8s/ssl/ca.pem \
--cert-file=/opt/k8s/ssl/flanneld.pem \
--key-file=/opt/k8s/ssl/flanneld-key.pem \
set ${FLANNEL_ETCD_PREFIX}/config '{"Network":"'${CLUSTER_CIDR}'", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'

**如果在flanneld启动的时候不指定${FLANNEL_ETCD_PREFIX} 将默认用的key为/coreos.com/network/config


  • flanneld安装服务及配置
vim /opt/k8s/install_flanneld.sh
source /opt/k8s/env.sh
# 编辑flanneld配置文件,内容如下

cat > /opt/k8s/cfg/flanneld << EOF
FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} -etcd-prefix=${FLANNEL_ETCD_PREFIX} -etcd-cafile=/opt/kubernetes/ssl/ca.pem -etcd-certfile=/opt/kubernetes/ssl/flanneld.pem -etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"
EOF

# flanneld的service文件
cat > /opt/k8s/cfg/flanneld.service <<EOF
[Unit]
Description= Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF


# 让docker支持flannel网络
cat > /opt/k8s/cfg/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd \$DOCKER_NETWORK_OPTIONS --exec-opt native.cgroupdriver=cgroupfs --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10
ExecReload=/bin/kill -s HUP \$MAINPID
ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s


[Install]
WantedBy=multi-user.target
EOF
  • 生成flannel配置服务
sh install_flanneld.sh

  • kubelet安装服务及配置
# kubelet优化预留资源
# vim install-kubelet.sh
#!/bin/bash
source /opt/env.sh

NODE_ADDRESS=$1
#DNS_SERVER_IP=${2:-"10.254.0.2"}

cat <<EOF >/opt/kubernetes/cfg/kubelet

KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--address=${NODE_ADDRESS} \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--cert-dir=/opt/kubernetes/ssl \\
--cgroup-driver=cgroupfs \\
--cluster-dns=${CLUSTER_DNS_SVC_IP} \\
--cluster-domain=${CLUSTER_DNS_DOMAIN} \\
--pod-infra-container-image=harbor.btcexa.com/kubernetes-ops/pause-amd64:3.0 \\
--enforce-node-allocatable=pods,system-reserved,kube-reserved \\
--kube-reserved=cpu=512m,memory=1024Mi,ephemeral-storage=2Gi \\
--system-reserved=cpu=512m,memory=1024Mi,ephemeral-storage=2Gi \\
--eviction-hard=imagefs.available<10%,memory.available<200Mi,nodefs.available<10%,nodefs.inodesFree<10% \\
--system-reserved-cgroup=/system.slice/ \\
--kube-reserved-cgroup=/system.slice/kubelet.service"

EOF

cat <<EOF >/opt/kubernetes/cfg/kubelet.config

kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port: 10250
cgroupDriver: cgroupfs
clusterDNS:
- ${CLUSTER_DNS_SVC_IP}
clusterDomain: ${CLUSTER_DNS_DOMAIN}
failSwapOn: false

EOF

cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/cpuset/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/hugetlb/system.slice/kubelet.service
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

#systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet
  • kuber-proxy 安装服务及配置
# vim install-kube-proxy.sh
#!/bin/bash
source /opt/env.sh
NODE_ADDRESS=$1

cat <<EOF >/opt/kubernetes/cfg/kube-proxy

KUBE_PROXY_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=${SERVICE_CIDR} \\
--proxy-mode=ipvs \\
--ipvs-min-sync-period=5s \\
--ipvs-sync-period=5s \\
--ipvs-scheduler=rr \\
--masquerade-all=true \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

# systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy

  • 复制相关文件到主从节点的bin目录。这些文件来源于Kk8s和docker区别ubernetes包和flannel包。 每个节点的bin如下
# vim install_node.sh
#!/bin/bash
source /opt/k8s/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
#####etcd
# 创建kubernetes目录
ssh root@${node_ip} "mkdir -p /opt/kubernetes/{cfg,bin,ssl}"

#------------------------------------------------------------
###复制flanneld
cd /opt/k8s/ssl/
scp -p flanneld*.pem root@${node_ip}:/opt/kubernetes/ssl/
scp -p /opt/k8s/cfg/flanneld root@${node_ip}:/opt/kubernetes/cfg/

# 复制flanneld执行程序
cd /opt/k8s
scp -p flanneld mk-docker-opts.sh root@${node_ip}:/opt/kubernetes/bin/

# 启动flanneld
cd /opt/k8s/cfg/
scp -p flanneld.service docker.service root@${node_ip}:/usr/lib/systemd/system/
# ssh root@${node_ip} "systemctl daemon-reload && systemctl restart flanneld && systemctl restart docker"

#-----------------------------------------------------------
###复制kubelet kubelet-proxy
cd /opt/k8s/
scp -p bootstrap.kubeconfig kube-proxy.kubeconfig root@${node_ip}:/opt/kubernetes/cfg/

cd /opt/k8s/ssl/
scp -p ca.pem kube-proxy*.pem root@${node_ip}:/opt/kubernetes/ssl/

# 复制kubelet kube-proxy执行程序
cd /opt/k8s/kubernetes/server/bin/
scp -p kubelet kube-proxy root@${node_ip}:/opt/kubernetes/bin/

# 复制安装配置文件
cd /opt/k8s
scp -p env.sh install-kubelet.sh install-kube-proxy.sh root@${node_ip}:/opt/

done
# node1节点
ssh root@10.2.33.5 "sh /opt/install-kubelet.sh 10.2.33.5"
ssh root@10.2.33.5 "sh /opt/install-kube-proxy.sh 10.2.33.5"

# node2节点
ssh root@10.2.33.127 "sh /opt/install-kubelet.sh 10.2.33.127"
ssh root@10.2.33.127 "sh /opt/install-kube-proxy.sh 10.2.33.127"

# node3节点
ssh root@10.2.33.65 "sh /opt/install-kubelet.sh 10.2.33.65"
ssh root@10.2.33.65 "sh /opt/install-kube-proxy.sh 10.2.33.65"
  • 启动node节点
# sh install_node.sh

# vim start_node.sh
source /opt/k8s/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable flanneld && systemctl restart flanneld"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable docker && systemctl restart docker"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy"

done

# sh start_node.sh
  • 在主节点docker容器上查看是否有node节点加入集群请求
# kubectl  get csr
NAME AGE REQUESTOR CONDITION
node-csr-Cw58qBpbo91wRpOGa81fFP5KfnqiRGVMKzuSMcfbH4A 9s kubelet-bootstrap Pending
node-csr-NUUmAMLXGjyQUxv0tvn1zONDbMU1gkgJz_9t8CR28oI 7s kubelet-bootstrap Pending
node-csr-q0s0lu-XbtWNg02MonWgISrulUScob12S7il-HR5-YU 6s kubelet-bootstrap Pending
  • 在master节点上批准请求
#  kubectl get csr|grep 'Pending' | awk '{print $1}'| xargs kubectl certificate approve
certificatesigningrequest.certificates.k8s.io/node-csr-Cw58qBpbo91wRpOGa81fFP5KfnqiRGVMKzuSMcfbH4A approved
certificatesigningrequest.certificates.k8s.io/node-csr-NUUmAMLXGjyQUxv0tvn1zONDbMU1gkgJz_9t8CR28oI approved
certificatesigningrequest.certificates.k8s.io/node-csr-q0s0lu-XbtWNg02MonWgISrulUScob12S7il-HR5-YU approved
  • 在主节点上查看通过的node
# kubectl  get csr
NAME AGE REQUESTOR CONDITION
node-csr-Cw58qBpbo91wRpOGa81fFP5KfnqiRGVMKzuSMcfbH4A 23s kubelet-bootstrap Approved,Issued
node-csr-NUUmAMLXGjyQUxv0tvn1zONDbMU1gkgJz_9t8CR28oI 21s kubelet-bootstrap Approved,Issued
node-csr-q0s0lu-XbtWNg02MonWgISrulUScob12S7il-HR5-YU 20s kubelet-bootstrap Approved,Issued
  • 查看node状态
  1. 2.33.65 10.2.33.5 10.2.33.127
# kubectl  get node
NAME STATUS ROLES AGE VERSION
10.2.33.127 NotReady <none> 14s v1.17.0
10.2.33.5 NotReady <none> 14s v1.17.0
10.2.33.65 NotReady <none> 14s v1.17.0

# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
10.2.33.127 NotReady <none> 21s v1.17.0 10.2.33.127 <none> CentOS Linux 7 (Core) 5.4.6-1.el7.elrepo.x86_64 docker://19.3.5
10.2.33.5 NotReady <none> 21s v1.17.0 10.2.33.5 <none> CentOS Linux 7 (Core) 5.4.6-1.el7.elrepo.x86_64 docker://19.3.5
10.2.33.65 NotReady <none> 21s v1.17.0 10.2.33.65 <none> CentOS Linux 7 (Core) 5.4.6-1.el7.elrepo.x86_64 docker://19.3.5

# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.254.0.1:443 rr
-> 10.2.33.5:6443 Masq 1 0 0
-> 10.2.33.127:6443 Masq 1 0 0
  • 检查集群组建状态
在三台节点上检查kube-proxy是否启动
systemctl status kube-proxy

systemctl status kube-apiserver
systemctl status kube-controller-manager
systemctl status kube-scheduler
systemctl status etcd
systemctl status flanneld
systemctl status docker
systemctl status kubelet

systemctl start kube-apiserver
systemctl start kube-controller-manager
systemctl start kube-scheduler
systemctl start kubec
systemctl start etcd
systemctl start flanneld
systemctl restart docker
systemctl status kubelet

systemctl start kubelet
systemctl stop etcd
systemctl stop kube-apiserver
systemctl stop kube-controller-manager
systemctl stop kube-scheduler
systemctl stop flanneld
systemctl stop docker
systemctl stop kubelet
systemctl stop kube-proxy
  • 查看端点信息:
# kubectl get endpoints
NAME ENDPOINTS AGE
kubernetes 10.2.33.127:6443,10.2.33.5:6443 22m
  • 检测脚本:
# vim check_flanneld.sh
#!bin/bash
source /opt/k8s/env.sh
/opt/etcd/bin/etcdctl --endpoints=${ETCD_ENDPOINTS} \
--ca-file=/opt/etcd/ssl/ca.pem \
--cert-file=/opt/etcd/ssl/etcd.pem \
--key-file=/opt/etcd/ssl/etcd-key.pem \
get ${FLANNEL_ETCD_PREFIX}/config

/opt/etcd/bin/etcdctl --endpoints=${ETCD_ENDPOINTS} \
--ca-file=/opt/etcd/ssl/ca.pem \
--cert-file=/opt/etcd/ssl/etcd.pem \
--key-file=/opt/etcd/ssl/etcd-key.pem \
ls ${FLANNEL_ETCD_PREFIX}/subnets

# sh check_flanneld.sh
输出:
{"Network":"10.10.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}
/kubernetes/network/subnets/10.10.63.0-24
/kubernetes/network/subnets/10.10.69.0-24
/kubernetes/network/subnets/10.10.40.0-24
  • 查看子网信息
# source /opt/k8s/env.sh
# /opt/etcd/bin/etcdctl --endpoints=${ETCD_ENDPOINTS} \
--ca-file=/opt/etcd/ssl/ca.pem \
--cert-file=/opt/etcd/ssl/etcd.pem \
--key-file=/opt/etcd/ssl/etcd-key.pem \
get ${FLANNEL_ETCD_PREFIX}/subnets/10.10.63.0-24
输出
{"PublicIP":"10.2.33.65","BackendType":"vxlan","BackendData":{"VtepMAC":"92:4f:b7:1d:24:ef"}}
测试命令:
# ssh 10.2.33.65 "ip addr show flannel.1| grep -w inet"
inet 10.10.63.0/32 scope global flannel.1

# ssh 10.2.33.65 "ping -c 1 10.10.63.0"
PING 10.10.63.0 (10.10.63.0) 56(84) bytes of data.
64 bytes from 10.10.63.0: icmp_seq=1 ttl=64 time=0.062 ms

# telnet 10.2.33.65 22
Trying 10.2.33.65...
Connected to 10.2.33.65.
Escape character is '^]'.
SSH-2.0-OpenSSH_7.4
  • 然后查看每个节点上的Ip配置信息,可以测试用ssh连接其它节点flannel网卡的ip云计算。 如docker常用命令果连接上说明flannel配置成功。


coredns

两种安装方法:
方法一
安装包自带包
# tar xf kubernetes-server-linux-amd64.tar.gz
# cd /opt/k8s/kubernetes
# tar xf kubernetes-src.tar.gz
# cd /opt/k8s/kubernetes/cluster/addons/dns/coredns
# cp coredns.yaml.base coredns.yaml
修改配置文件
# diff coredns.yaml.base coredns.yaml
68c68
< kubernetes __PILLAR__DNS__DOMAIN__ in-addr.arpa ip6.arpa {
---
> kubernetes 10.254.0.0/16 cluster.local. in-addr.arpa ip6.arpa {
95a96
> replicas: 2
118a120
> #image: k8s.gcr.io/coredns:1.6.2
189c191
< clusterIP: __PILLAR__DNS__SERVER__
---
> clusterIP: 10.254.0.2 #ClusterDNS地址
方法二
# cd /opt/k8s (镜像image: coredns/coredns:1.6.5)
# git clone https://github.com/coredns/deployment.git
Cloning into 'deployment'...

remote: Enumerating objects: 1, done.
remote: Counting objects: 100% (1/1), done.
remote: Total 402 (delta 0), reused 0 (delta 0), pack-reused 401
Receiving objects: 100% (402/402), 117.14 KiB | 122.00 KiB/s, done.
Resolving deltas: 100% (191/191), done.

# mv deployment/ coredns/
# cd /opt/k8s/coredns/kubernetes
# yum -y install jq conntrack-tools
# ./deploy.sh -s -r 10.254.0.0/16 -i 10.254.0.2 -d cluster.local > coredns.yaml

# diff coredns.yaml.sed coredns.yaml
61c61
< kubernetes CLUSTER_DOMAIN REVERSE_CIDRS {
---
> kubernetes cluster.local 10.254.0.0/16 {
63c63
< }FEDERATIONS
---
> }
65c65
< forward . UPSTREAMNAMESERVER
---
> forward . /etc/resolv.conf
70c70
< }STUBDOMAINS
---
> }
183c183
< clusterIP: CLUSTER_DNS_IP
---
> clusterIP: 10.254.0.2

# kubectl create -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created

# cd /opt/k8s/
# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.254.0.2 <none> 53/UDP,53/TCP,9153/TCP 11s

# kubectl get po -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-76b74f549-xhm78 1/1 Running 0 14m 10.10.69.2 10.2.33.5 <none> <none>

# kubectl get all -n kube-system
NAME READY STATUS RESTARTS AGE
pod/coredns-76b74f549-xhm78 1/1 Running 0 14m

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kube-dns ClusterIP 10.254.0.2 <none> 53/UDP,53/TCP,9153/TCP 14m

NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/coredns 1/1 1 1 14m

NAME DESIRED CURRENT READY AGE
replicaset.apps/coredns-76b74f549 1 1 1 14m
  • 检查验证

要查看日志

# kubectl logs -f  coredns-7b5fbb568b-xqjck -n kube-system
.:53
[INFO] plugin/reload: Running configuration MD5 = 1ee2e9685eedeba796e481c372ac7de4
CoreDNS-1.6.6
linux/amd64, go1.13.5, 6a7a75e
  • 首先创建个pod nginx
# cat > my-nginx.yaml<<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-nginx
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: my-nginx
template:
metadata:
labels:
app: my-nginx
spec:
containers:
- name: my-nginx
image: nginx:1.9
ports:
- name: http
containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: my-nginx-service
namespace: default
spec:
selector:
app: my-nginx
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 80
EOF

# kubectl apply -f my-nginx.yaml
deployment.apps/my-nginx created
service/my-nginx-service created

# kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
my-nginx 2/2 2 2 90s
# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 42m
my-nginx-service ClusterIP 10.254.13.183 <none> 80/TCP 11s

在pod中看是否能请求解析到10.254.13.183
# kubectl run -it --rm busybox --image=busybox /bin/sh
/ # ping my-nginx-service
PING my-nginx-service (10.254.13.183): 56 data bytes
64 bytes from 10.254.13.183: seq=0 ttl=64 time=0.185 ms
64 bytes from 10.254.13.183: seq=1 ttl=64 time=0.070 ms

/ # ping my-nginx-service.default.svc.cluster.local
PING my-nginx-service.default.svc.cluster.local (10.254.13.183): 56 data bytes
64 bytes from 10.254.13.183: seq=0 ttl=64 time=0.041 ms
64 bytes from 10.254.13.183: seq=1 ttl=64 time=0.074 ms

/ # ping kubernetes
PING kubernetes (10.254.0.1): 56 data bytes
64 bytes from 10.254.0.1: seq=0 ttl=64 time=0.045 ms
64 bytes from 10.254.0.1: seq=1 ttl=64 time=0.073 ms

/ # ping kube-dns.kube-system.svc.cluster.local
PING kube-dns.kube-system.svc.cluster.local (10.254.0.2): 56 data bytes
64 bytes from 10.254.0.2: seq=0 ttl=64 time=0.045 ms
64 bytes from 10.254.0.2: seq=1 ttl=64 time=0.066 ms

  • 查看ipvs集群
[root@ip-10-2-33-5 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.254.0.1:443 rr
-> 10.2.33.5:6443 Masq 1 0 0
-> 10.2.33.127:6443 Masq 1 1 0
TCP 10.254.0.2:53 rr
-> 10.10.69.2:53 Masq 1 0 0
TCP 10.254.0.2:9153 rr
-> 10.10.69.2:9153 Masq 1 0 0
TCP 10.254.13.183:80 rr
-> 10.10.40.2:80 Masq 1 0 0
-> 10.10.63.2:80 Masq 1 0 0
UDP 10.254.0.2:53 rr
-> 10.10.69.2:53 Masq 1 0 8

# journalctl -u kube-proxy.service
-- Logs begin at Thu 2019-12-26 09:04:00 UTC, end at Thu 2019-12-26 10:30:39 UTC. --
Dec 26 10:06:24 ip-10-2-33-5.ec2.internal kube-proxy[2850]: I1226 10:06:24.978700 2850 flags.go:33] FLAG: --cleanup-ipvs="true"
Dec 26 10:06:24 ip-10-2-33-5.ec2.internal kube-proxy[2850]: I1226 10:06:24.978707 2850 flags.go:33] FLAG: --cluster-cidr="10.254.0.0/16"

kubernetes 1.17.0版本支持新接口

# kubectl  api-versions
admissionregistration.k8s.io/v1
admissionregistration.k8s.io/v1beta1
apiextensions.k8s.io/v1
apiextensions.k8s.io/v1beta1
apiregistration.k8s.io/v1
apiregistration.k8s.io/v1beta1
apps/v1
authentication.k8s.io/v1
authentication.k8s.io/v1beta1
authorization.k8s.io/v1
authorization.k8s.io/v1beta1
autoscaling/v1
autoscaling/v2beta1
autoscaling/v2beta2
batch/v1
batch/v1beta1
certificates.k8s.io/v1beta1
coordination.k8s.io/v1
coordination.k8s.io/v1beta1
discovery.k8s.io/v1beta1
events.k8s.io/v1beta1
extensions/v1beta1
networking.k8s.io/v1
networking.k8s.io/v1beta1
node.k8s.io/v1beta1
policy/v1beta1
rbac.authorization.k8s.io/v1
rbac.authorization.k8s.io/v1beta1
scheduling.k8s.io/v1
scheduling.k8s.io/v1beta1
storage.k8s.io/v1
storage.k8s.io/v1beta1
v1

Harbor接入kubernetes


  • 创建k8s令牌
在master节点上要登录到harbor中

# docker login -u k8s-btcexa -p 'Blockshine123' harbor.btcexa.com
Login Succeeded
认证信息自动保存到 ~/.docker/config.json 文件。


# cat /root/.docker/config.json | base64 -w 0
ewoJImF1dGhzIjogewoJCSJoYXJib3IuYnRjZXhhLmNvbSI6IHsKCQkJImF1dGgiOiAiYXpoekxXSjBZMlY0WVRwQ2JHOWphM05vYVc1bE1USXoiCgkJfQoJfSwKCSJIdHRwSGVhZGVycyI6IHsKCQkiVXNlci1BZ2VudCI6ICJEb2NrZXItQ2xpZW50LzE5LjAzLjUgKGxpbnV4KSIKCX0KfQ==

# vim harborsecret.yaml
apiVersion: v1
kind: Secret
metadata:
name: harborsecret
namespace: default
data:
.dockerconfigjson: ewoJImF1dGhzIjogewoJCSJoYXJib3IuYnRjZXhhLmNvbSI6IHsKCQkJImF1dGgiOiAiYXpoekxXSjBZMlY0WVRwQ2JHOWphM05vYVc1bE1USXoiCgkJfQoJfSwKCSJIdHRwSGVhZGVycyI6IHsKCQkiVXNlci1BZ2VudCI6ICJEb2NrZXItQ2xpZW50LzE5LjAzLjUgKGxpbnV4KSIKCX0KfQ==
type: kubernetes.io/dockerconfigjson

# kubectl create -f harborsecret.yaml
# kubectl get secret
NAME TYPE DATA AGE
default-token-m5hwn kubernetes.io/service-account-token 3 48m
harborsecret kubernetes.io/dockerconfigjson 1 6s
  • 测试pod
# cat my-nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-nginx
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: my-nginx
template:
metadata:
labels:
app: my-nginx
spec:
containers:
- name: my-nginx
image: harbor.btcexa.com/nginx/nginx:latest
imagePullPolicy: Always
ports:
- name: http
containerPort: 80
imagePullSecrets:
- name: harborsecret
---
apiVersion: v1
kind: Service
metadata:
name: my-nginx-service
namespace: default
spec:
selector:
app: my-nginx
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 80
  • 创建pod
# kubectl apply -f my-mginx.yml
# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 56m
my-nginx-service ClusterIP 10.254.132.201 <none> 80/TCP 109s
# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
my-nginx-79f865b55-phzvq 1/1 Running 0 2m1s 10.10.63.2 10.2.33.65 <none> <none>
my-nginx-79f865b55-pwn7k 1/1 Running 0 2m1s 10.10.40.2 10.2.33.127 <none> <none>

# kubectl describe pod my-nginx-79f865b55-67jt4
....
my-nginx:
Container ID: docker://dba7fd7c3444ebc53919e0aba07ad6d32339eb6b6f1cbffbb4eb2b88a6a2c7ed
Image: harbor.btcexa.com/nginx/nginx:latest
....
....
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/my-nginx-79f865b55-phzvq to 10.2.33.65
Normal Pulling 2m18s kubelet, 10.2.33.65 Pulling image "harbor.btcexa.com/nginx/nginx:latest"
Normal Pulled 2m14s kubelet, 10.2.33.65 Successfully pulled image "harbor.btcexa.com/nginx/nginx:latest"
Normal Created 2m13s kubelet, 10.2.33.65 Created container my-nginx
Normal Started 2m13s kubelet, 10.2.33.65 Started container my-nginx