准备环境
本文所有使用的文件:
链接:https://pan.baidu.com/s/1vtDxA6K2BYT8fZYi8CiuQw 提取码:sgfw
角色
IP
组件
master1
192.168.1.120
kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy、docker、etcd、nginx、keepalived
master2
192.168.1.123
kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy、docker、nginx、keepalived
node1
192.168.1.121
kubelet、kube-proxy、docker、etcd
node2
192.168.1.122
kubelet、kube-proxy、docker、etcd
负载均衡器
192.168.1.124 192.168.1.100(虚拟vip)
初始化配置(master1、master2、node1、node2) # 关闭防火墙 systemctl stop firewalld systemctl disable firewalld # 关闭selinux sed -i 's/enforcing/disabled/' /etc/selinux/config #永久 setenforce 0 #临时 # 关闭swap swapoff -a #临时 # 设置主机名 hostnamectl set-hostname <hostname> # 添加hosts cat >> /etc/hosts << EOF 192.168.1.120 master1 192.168.1.121 node1 192.168.1.122 node2 192.168.1.123 master2 EOF # 将桥接的IPv4流量传递到iptables的链 cat > /etc/sysctl.d/k8s.conf << EOF net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sysctl --system #生效配置 # 修改时区并同步时间 cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime yum install ntpdate -y ntpdate ntp1.aliyun.com
部署Etcd集群(master1、node1、node2) 准备cfssl证书工具(master1) mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo mv cfssljson_linux-amd64 /usr/bin/cfssljson mv cfssl_linux-amd64 /usr/bin/cfssl
生成Etcd证书(master1) 自签CA证书 创建证书目录:
mkdir -p ~/TLS/{etcd,k8s} cd ~/TLS/etcd
自签CA:
cat > ca-config.json << EOF { "signing": { "default": { "expiry": "87600h" }, "profiles": { "www": { "expiry": "87600h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } } EOF
cat > ca-csr.json << EOF { "CN": "etcd CA", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing" } ] } EOF
生成证书:
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
使用CA签发Etcd HTTPS证书 创建证书申请文件:
cat > server-csr.json << EOF { "CN": "etcd", "hosts": [ "192.168.1.120", "192.168.1.121", "192.168.1.122" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing" } ] } EOF
生成证书:
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
部署Etcd集群(master1、node1、node2)
以下操作均在master1上执行,后面将配置拷到node1、node2
创建目录并解压二进制包 tar zxvf etcd-v3.4.9-linux-amd64.tar.gz mkdir /opt/etcd/{bin,cfg,ssl} -p mv etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/
创建etcd配置文件 cat > /opt/etcd/cfg/etcd.conf << EOF #[Member] # 节点名称 ETCD_NAME="etcd-1" # 数据目录 ETCD_DATA_DIR="/var/lib/etcd/default.etcd" # 集群通信监听地址 ETCD_LISTEN_PEER_URLS="https://192.168.1.120:2380" # 客户端访问监听地址 ETCD_LISTEN_CLIENT_URLS="https://192.168.1.120:2379" #[Clustering] # 集群通告地址 ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.120:2380" # 客户端通告地址 ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.120:2379" # 集群节点地址 ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.1.120:2380,etcd-2=https://192.168.1.121:2380,etcd-3=https://192.168.1.122:2380" # 集群token ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" # 加入集群的当前状态,new表示新集群,existing表示加入已有集群 ETCD_INITIAL_CLUSTER_STATE="new" EOF
systemd管理etcd cat > /usr/lib/systemd/system/etcd.service << EOF [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target [Service] Type=notify EnvironmentFile=/opt/etcd/cfg/etcd.conf ExecStart=/opt/etcd/bin/etcd \ --cert-file=/opt/etcd/ssl/server.pem \ --key-file=/opt/etcd/ssl/server-key.pem \ --peer-cert-file=/opt/etcd/ssl/server.pem \ --peer-key-file=/opt/etcd/ssl/server-key.pem \ --trusted-ca-file=/opt/etcd/ssl/ca.pem \ --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \ --logger=zap Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
拷贝证书到相应目录 cp ~/TLS/etcd/ca*pem ~/TLS/etcd/server*pem /opt/etcd/ssl/
将master1所有文件拷贝到node1、node2 scp -r /opt/etcd/ root@node1:/opt scp /usr/lib/systemd/system/etcd.service root@node1:/usr/lib/systemd/system/ scp -r /opt/etcd/ root@node2:/opt scp /usr/lib/systemd/system/etcd.service root@node2:/usr/lib/systemd/system/
修改node1、node2上的etcd.conf配置文件中的相关配置 #[Member] # 节点名称 ETCD_NAME="etcd-1" #修改此处节点名称 node1改为etcd-2 node2改为etcd-3 # 数据目录 ETCD_DATA_DIR="/var/lib/etcd/default.etcd" # 集群通信监听地址 ETCD_LISTEN_PEER_URLS="https://192.168.1.120:2380" #修改此处为当前服务器地址 # 客户端访问监听地址 ETCD_LISTEN_CLIENT_URLS="https://192.168.1.120:2379" #修改此处为当前服务器地址 #[Clustering] # 集群通告地址 ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.120:2380" #修改此处为当前服务器地址 # 客户端通告地址 ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.120:2379" #修改此处为当前服务器地址 # 集群节点地址 ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.1.120:2380,etcd-2=https://192.168.1.121:2380,etcd-3=https://192.168.1.122:2380" # 集群token ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" # 加入集群的当前状态,new表示新集群,existing表示加入已有集群 ETCD_INITIAL_CLUSTER_STATE="new"
启动并设置开机自启 systemctl daemon-reload systemctl start etcd systemctl enable etcd
查看集群状态 ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.1.120:2379,https://192.168.1.121:2379,https://192.168.1.122:2379" endpoint health --write-out=table
安装docker(master1、master2、node1、node2) yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager \ --add-repo \ https://download.docker.com/linux/centos/docker-ce.repo yum install docker-ce -y curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://bc437cce.m.daocloud.io systemctl start docker systemctl enable docker
部署master1(master1) 生成kube-apiserver证书 自签CA证书 进入工作目录:
创建申请文件:
cat > ca-config.json << EOF { "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "expiry": "87600h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } } EOF
cat > ca-csr.json << EOF { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing", "O": "k8s", "OU": "System" } ] } EOF
生成证书:
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
使用CA签发kube-apiserver HTTPS证书 创建证书申请文件:
hosts字段配置所有机器IP,包括虚拟vip,可以再预留多个IP,方便后期扩容
cat > server-csr.json << EOF { "CN": "kubernetes", "hosts": [ "10.0.0.1", "127.0.0.1", "192.168.1.120", "192.168.1.121", "192.168.1.122", "192.168.1.123", "192.168.1.124", "192.168.1.100", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF
生成证书:
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
解压二进制包 mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs} tar zxvf kubernetes-server-linux-amd64.tar.gz cd kubernetes/server/bin cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bin cp kubectl /usr/bin/
部署kube-apiserver 创建配置文件 cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF KUBE_APISERVER_OPTS="--logtostderr=false \\ --v=2 \\ --log-dir=/opt/kubernetes/logs \\ --etcd-servers=https://192.168.1.120:2379,https://192.168.1.121:2379,https://192.168.1.122:2379 \\ --bind-address=192.168.1.120 \\ --secure-port=6443 \\ --advertise-address=192.168.1.120 \\ --allow-privileged=true \\ --service-cluster-ip-range=10.0.0.0/24 \\ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\ --authorization-mode=RBAC,Node \\ --enable-bootstrap-token-auth=true \\ --token-auth-file=/opt/kubernetes/cfg/token.csv \\ --service-node-port-range=30000-32767 \\ --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\ --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\ --tls-cert-file=/opt/kubernetes/ssl/server.pem \\ --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\ --client-ca-file=/opt/kubernetes/ssl/ca.pem \\ --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\ --service-account-issuer=api \\ --service-account-signing-key-file=/opt/kubernetes/ssl/server-key.pem \\ --etcd-cafile=/opt/etcd/ssl/ca.pem \\ --etcd-certfile=/opt/etcd/ssl/server.pem \\ --etcd-keyfile=/opt/etcd/ssl/server-key.pem \\ --requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem \\ --proxy-client-cert-file=/opt/kubernetes/ssl/server.pem \\ --proxy-client-key-file=/opt/kubernetes/ssl/server-key.pem \\ --requestheader-allowed-names=kubernetes \\ --requestheader-extra-headers-prefix=X-Remote-Extra- \\ --requestheader-group-headers=X-Remote-Group \\ --requestheader-username-headers=X-Remote-User \\ --enable-aggregator-routing=true \\ --audit-log-maxage=30 \\ --audit-log-maxbackup=3 \\ --audit-log-maxsize=100 \\ --audit-log-path=/opt/kubernetes/logs/k8s-audit.log" EOF
• –logtostderr:启用日志
• —v:日志等级
• –log-dir:日志目录
• –etcd-servers:etcd集群地址
• –bind-address:监听地址
• –secure-port:https安全端口
• –advertise-address:集群通告地址
• –allow-privileged:启用授权
• –service-cluster-ip-range:Service虚拟IP地址段
• –enable-admission-plugins:准入控制模块
• –authorization-mode:认证授权,启用RBAC授权和节点自管理
• –enable-bootstrap-token-auth:启用TLS bootstrap机制
• –token-auth-file:bootstrap token文件
• –service-node-port-range:Service nodeport类型默认分配端口范围
• –kubelet-client-xxx:apiserver访问kubelet客户端证书
• –tls-xxx-file:apiserver https证书
• 1.20版本必须加的参数:–service-account-issuer,–service-account-signing-key-file
• –etcd-xxxfile:连接Etcd集群证书
• –audit-log-xxx:审计日志
• 启动聚合层相关配置:–requestheader-client-ca-file,–proxy-client-cert-file,–proxy-client-key-file,–requestheader-allowed-names,–requestheader-extra-headers-prefix,–requestheader-group-headers,–requestheader-username-headers,–enable-aggregator-routing
拷贝刚刚生成的证书到指定目录 cp ~/TLS/k8s/ca*pem ~/TLS/k8s/server*pem /opt/kubernetes/ssl/
启动TLS Bootstrapping机制 生成token:
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
写入文件:
cat > /opt/kubernetes/cfg/token.csv << EOF 16e183444ad387caf9ad5f0fe6b83ea5,kubelet-bootstrap,10001,"system:node-bootstrapper" EOF
systemd管理apiserver cat > /usr/lib/systemd/system/kube-apiserver.service << EOF [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF
启动并设置开机自启 systemctl daemon-reload systemctl start kube-apiserver systemctl enable kube-apiserver
部署kube-controller-manager 创建配置文件 cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\ --v=2 \\ --log-dir=/opt/kubernetes/logs \\ --leader-elect=true \\ --kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \\ --bind-address=127.0.0.1 \\ --allocate-node-cidrs=true \\ --cluster-cidr=10.244.0.0/16 \\ --service-cluster-ip-range=10.0.0.0/24 \\ --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\ --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\ --root-ca-file=/opt/kubernetes/ssl/ca.pem \\ --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\ --cluster-signing-duration=87600h0m0s" EOF
• –kubeconfig:连接apiserver配置文件
• –leader-elect:当该组件启动多个时,自动选举(HA)
• –cluster-signing-cert-file/–cluster-signing-key-file:自动为kubelet颁发证书的CA,与apiserver保持一致
生成kubeconfig文件 切换工作目录:
创建证书请求文件:
cat > kube-controller-manager-csr.json << EOF { "CN": "system:kube-controller-manager", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "system:masters", "OU": "System" } ] } EOF
生成证书:
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
生成kubeconfig文件:
KUBE_CONFIG="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig" KUBE_APISERVER="https://192.168.1.120:6443" kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=${KUBE_CONFIG} kubectl config set-credentials kube-controller-manager \ --client-certificate=./kube-controller-manager.pem \ --client-key=./kube-controller-manager-key.pem \ --embed-certs=true \ --kubeconfig=${KUBE_CONFIG} kubectl config set-context default \ --cluster=kubernetes \ --user=kube-controller-manager \ --kubeconfig=${KUBE_CONFIG} kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
systemd管理controller-manager cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF
启动并设置开机自启 systemctl daemon-reload systemctl start kube-controller-manager systemctl enable kube-controller-manager
部署kube-scheduler 创建配置文件 cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF KUBE_SCHEDULER_OPTS="--logtostderr=false \\ --v=2 \\ --log-dir=/opt/kubernetes/logs \\ --leader-elect \\ --kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \\ --bind-address=127.0.0.1" EOF
• –kubeconfig:连接apiserver配置文件
• –leader-elect:当该组件启动多个时,自动选举(HA)
生成kubeconfig文件 切换工作目录:
创建证书请求文件:
cat > kube-scheduler-csr.json << EOF { "CN": "system:kube-scheduler", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "system:masters", "OU": "System" } ] } EOF
生成证书:
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
生成kubeconfig文件:
KUBE_CONFIG="/opt/kubernetes/cfg/kube-scheduler.kubeconfig" KUBE_APISERVER="https://192.168.1.120:6443" kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=${KUBE_CONFIG} kubectl config set-credentials kube-scheduler \ --client-certificate=./kube-scheduler.pem \ --client-key=./kube-scheduler-key.pem \ --embed-certs=true \ --kubeconfig=${KUBE_CONFIG} kubectl config set-context default \ --cluster=kubernetes \ --user=kube-scheduler \ --kubeconfig=${KUBE_CONFIG} kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
systemd管理scheduler cat > /usr/lib/systemd/system/kube-scheduler.service << EOF [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF
启动并设置开机启动 systemctl daemon-reload systemctl start kube-scheduler systemctl enable kube-scheduler
查看集群状态 生成kubectl连接集群的证书:
cat > admin-csr.json <<EOF { "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "system:masters", "OU": "System" } ] } EOF
生成证书:
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
生成kubeconfig文件:
mkdir /root/.kube KUBE_CONFIG="/root/.kube/config" KUBE_APISERVER="https://192.168.1.120:6443" kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=${KUBE_CONFIG} kubectl config set-credentials cluster-admin \ --client-certificate=./admin.pem \ --client-key=./admin-key.pem \ --embed-certs=true \ --kubeconfig=${KUBE_CONFIG} kubectl config set-context default \ --cluster=kubernetes \ --user=cluster-admin \ --kubeconfig=${KUBE_CONFIG} kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
通过kubectl查看集群状态:
授权kubelet-bootstrap用户允许请求证书 kubectl create clusterrolebinding kubelet-bootstrap \ --clusterrole=system:node-bootstrapper \ --user=kubelet-bootstrap
部署node(master1、node1、node2)
这里master1也作为worker节点,我们先只对master1操作,后面拷贝到node1和node2
创建工作目录和准备二进制文件 创建工作目录:
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
给master1、node1、node2准备二进制文件:
cd /usr/local/kubernetes/server/bin/ cp kubelet kube-proxy /opt/kubernetes/bin scp kubelet kube-proxy root@node1:/opt/kubernetes/bin scp kubelet kube-proxy root@node2:/opt/kubernetes/bin
部署kubelet 创建配置文件
注意修改主机名
cat > /opt/kubernetes/cfg/kubelet.conf << EOF KUBELET_OPTS="--logtostderr=false \\ --v=2 \\ --log-dir=/opt/kubernetes/logs \\ --hostname-override=master1 \\ --network-plugin=cni \\ --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\ --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\ --config=/opt/kubernetes/cfg/kubelet-config.yml \\ --cert-dir=/opt/kubernetes/ssl \\ --pod-infra-container-image=lizhenliang/pause-amd64:3.0" EOF
• –hostname-override:显示名称,集群中唯一
• –network-plugin:启用CNI
• –kubeconfig:空路径,会自动生成,后面用于连接apiserver
• –bootstrap-kubeconfig:首次启动向apiserver申请证书
• –config:配置参数文件
• –cert-dir:kubelet证书生成目录
• –pod-infra-container-image:管理Pod网络容器的镜像
配置参数文件 cat > /opt/kubernetes/cfg/kubelet-config.yml << EOF kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 address: 0.0.0.0 port: 10250 readOnlyPort: 10255 cgroupDriver: cgroupfs clusterDNS: - 10.0.0.2 clusterDomain: cluster.local failSwapOn: false authentication: anonymous: enabled: false webhook: cacheTTL: 2m0s enabled: true x509: clientCAFile: /opt/kubernetes/ssl/ca.pem authorization: mode: Webhook webhook: cacheAuthorizedTTL: 5m0s cacheUnauthorizedTTL: 30s evictionHard: imagefs.available: 15% memory.available: 100Mi nodefs.available: 10% nodefs.inodesFree: 5% maxOpenFiles: 1000000 maxPods: 110 EOF
生成kubelet初次加入集群引导kubeconfig文件 KUBE_CONFIG="/opt/kubernetes/cfg/bootstrap.kubeconfig" KUBE_APISERVER="https://192.168.1.120:6443" # apiserver IP:PORT TOKEN="16e183444ad387caf9ad5f0fe6b83ea5" # 与token.csv里保持一致 # 生成 kubelet bootstrap kubeconfig 配置文件 kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=${KUBE_CONFIG} kubectl config set-credentials "kubelet-bootstrap" \ --token=${TOKEN} \ --kubeconfig=${KUBE_CONFIG} kubectl config set-context default \ --cluster=kubernetes \ --user="kubelet-bootstrap" \ --kubeconfig=${KUBE_CONFIG} kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
systemd管理kubelet cat > /usr/lib/systemd/system/kubelet.service << EOF [Unit] Description=Kubernetes Kubelet After=docker.service [Service] EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
启动并设置开机启动
systemctl daemon-reload systemctl start kubelet systemctl enable kubelet
批准kubelet证书申请并加入集群
kubectl get csr kubectl certificate approve node-csr-cpZHvIc7wzZb_6Vln-ab9WF2rovEBJRiUlIiARWTJ_g
部署kube-proxy 创建配置文件 cat > /opt/kubernetes/cfg/kube-proxy.conf << EOF KUBE_PROXY_OPTS="--logtostderr=false \\ --v=2 \\ --log-dir=/opt/kubernetes/logs \\ --config=/opt/kubernetes/cfg/kube-proxy-config.yml" EOF
配置参数文件 cat > /opt/kubernetes/cfg/kube-proxy-config.yml << EOF kind: KubeProxyConfiguration apiVersion: kubeproxy.config.k8s.io/v1alpha1 bindAddress: 0.0.0.0 metricsBindAddress: 0.0.0.0:10249 clientConnection: kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig hostnameOverride: master1 clusterCIDR: 10.0.0.0/24 EOF
生成kube-proxy.kubeconfig文件 切换工作目录:
创建证书请求文件:
cat > kube-proxy-csr.json << EOF { "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF
生成证书:
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
生成kubeconfig文件:
KUBE_CONFIG="/opt/kubernetes/cfg/kube-proxy.kubeconfig" KUBE_APISERVER="https://192.168.1.120:6443" kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=${KUBE_CONFIG} kubectl config set-credentials kube-proxy \ --client-certificate=./kube-proxy.pem \ --client-key=./kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=${KUBE_CONFIG} kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ --kubeconfig=${KUBE_CONFIG} kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
systemd管理kube-proxy cat > /usr/lib/systemd/system/kube-proxy.service << EOF [Unit] Description=Kubernetes Proxy After=network.target [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
启动并设置开机启动 systemctl daemon-reload systemctl start kube-proxy systemctl enable kube-proxy
部署网络组件 kubectl apply calico.yaml kubectl get pods -n kube-system
授权apiserver访问kubelet cat > apiserver-to-kubelet-rbac.yaml << EOF apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:kube-apiserver-to-kubelet rules: - apiGroups: - "" resources: - nodes/proxy - nodes/stats - nodes/log - nodes/spec - nodes/metrics - pods/log verbs: - "*" --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: system:kube-apiserver namespace: "" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:kube-apiserver-to-kubelet subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: kubernetes EOF kubectl apply -f apiserver-to-kubelet-rbac.yaml
新增node1和node2节点 拷贝master1上的文件到node1和node2节点: scp -r /opt/kubernetes root@node1:/opt/ scp -r /opt/kubernetes root@node2:/opt/ scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@node1:/usr/lib/systemd/system scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@node2:/usr/lib/systemd/system scp /opt/kubernetes/ssl/ca.pem root@node1:/opt/kubernetes/ssl scp /opt/kubernetes/ssl/ca.pem root@node2:/opt/kubernetes/ssl
删除node1和node2的kubelet证书和kubeconfig文件: rm -f /opt/kubernetes/cfg/kubelet.kubeconfig rm -f /opt/kubernetes/ssl/kubelet*
修改主机名
vim /opt/kubernetes/cfg/kubelet.conf
--hostname-override=node1
vim /opt/kubernetes/cfg/kube-proxy-config.yml
启动并设置开机自启 systemctl daemon-reload systemctl start kubelet kube-proxy systemctl enable kubelet kube-proxy
在Master上批准新Node kubelet证书申请 kubectl get csr kubectl certificate approve node-csr-17xyMtSOCXukCJKwTxgf22j4UBbNB5Q6WtCBAV_BWbs kubectl certificate approve node-csr-fxT7uV4zvIh1RJVQ7K6E-hLSS47GmBW5VBFBOrKzVRI
部署CoreDNS kubectl apply -f coredns.yaml
新增master2(master2) 创建Etcd证书目录
拷贝文件(master1操作) scp -r /opt/kubernetes root@master2:/opt scp -r /opt/etcd/ssl root@master2:/opt/etcd scp /usr/lib/systemd/system/kube* root@master2:/usr/lib/systemd/system scp /usr/bin/kubectl root@master2:/usr/bin scp -r ~/.kube root@master2:~
删除证书文件 rm -f /opt/kubernetes/cfg/kubelet.kubeconfig rm -f /opt/kubernetes/ssl/kubelet*
修改配置文件
vim /opt/kubernetes/cfg/kube-apiserver.conf
... --bind-address=192.168.1.123 \ --advertise-address=192.168.1.123 \ ...
vim /opt/kubernetes/cfg/kube-controller-manager.kubeconfig
... server: https://192.168.1.123:6443 ...
vim /opt/kubernetes/cfg/kube-scheduler.kubeconfig
... server: https://192.168.1.123:6443 ...
vim /opt/kubernetes/cfg/kubelet.conf
... --hostname-override=master2 ...
vim /opt/kubernetes/cfg/kube-proxy-config.yml
... hostnameOverride: master2 ...
vim ~/.kube/config
... server: https://192.168.1.123:6443 ...
启动设置开机启动 systemctl daemon-reload systemctl start kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy systemctl enable kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy
查看集群组件状态
批准kubelet证书申请 kubectl get csr kubectl certificate approve node-csr-h5nTCqbICGiH_YYIsbuemNpA6PNyGy19hKdilgjkWkM # 查看node kubectl get node
部署Nginx+Keepalived高可用 安装nginx和keepalived yum install epel-release -y yum install nginx keepalived -y
Nginx配置文件(主备一样) user nginx; worker_processes auto; error_log /var/log/nginx/error.log; pid /run/nginx.pid; include /usr/share/nginx/modules/*.conf; events { worker_connections 1024; } # 四层负载均衡,为两台Master apiserver组件提供负载均衡 stream { log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent'; access_log /var/log/nginx/k8s-access.log main; upstream k8s-apiserver { server 192.168.1.120:6443; # Master1 APISERVER IP:PORT server 192.168.1.123:6443; # Master2 APISERVER IP:PORT } server { listen 6443; proxy_pass k8s-apiserver; } } http { log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; include /etc/nginx/mime.types; default_type application/octet-stream; server { listen 80 default_server; server_name _; location / { } } }
Keepalived配置(Master) global_defs { notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } notification_email_from Alexandre.Cassen@firewall.loc smtp_server 127.0.0.1 smtp_connect_timeout 30 router_id NGINX_MASTER } vrrp_script check_nginx { script "/etc/keepalived/check_nginx.sh" } vrrp_instance VI_1 { state MASTER interface eth1 virtual_router_id 51 priority 100 advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.1.100/24 } track_script { check_nginx } }
Keepalived配置(Backup) global_defs { notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } notification_email_from Alexandre.Cassen@firewall.loc smtp_server 127.0.0.1 smtp_connect_timeout 30 router_id NGINX_BACKUP } vrrp_script check_nginx { script "/etc/keepalived/check_nginx.sh" } vrrp_instance VI_1 { state BACKUP interface eth1 virtual_router_id 51 priority 90 advert_int 1 authentication { auth_type PASS auth_pass 1111 } # 虚拟IP virtual_ipaddress { 192.168.1.100/24 } track_script { check_nginx } }
心跳检测脚本(主备都准备) cat > /etc/keepalived/check_nginx.sh << "EOF" #!/bin/bash count=$(ss -antp |grep 6443 |egrep -cv "grep|$$") if [ "$count" -eq 0 ];then exit 1 else exit 0 fi EOF chmod +x /etc/keepalived/check_nginx.sh
启动并设置开机启动 systemctl start nginx keepalived systemctl enable nginx keepalived
查看虚拟VIP是否生成 [root@nginx1 ~]# ip address 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 52:54:00:4d:77:d3 brd ff:ff:ff:ff:ff:ff inet 10.0.2.15/24 brd 10.0.2.255 scope global noprefixroute dynamic eth0 valid_lft 84121sec preferred_lft 84121sec inet6 fe80::5054:ff:fe4d:77d3/64 scope link valid_lft forever preferred_lft forever 3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 08:00:27:7e:84:7a brd ff:ff:ff:ff:ff:ff inet 192.168.1.124/24 brd 192.168.1.255 scope global noprefixroute eth1 valid_lft forever preferred_lft forever inet 192.168.1.100/24 scope global secondary eth1 valid_lft forever preferred_lft forever inet6 fe80::a00:27ff:fe7e:847a/64 scope link valid_lft forever preferred_lft forever
测试虚拟VIP是否可以漂移 关闭Nginx Master,在Nginx Backup查看虚拟vip
[root@nginx2 ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 52:54:00:4d:77:d3 brd ff:ff:ff:ff:ff:ff inet 10.0.2.15/24 brd 10.0.2.255 scope global noprefixroute dynamic eth0 valid_lft 83903sec preferred_lft 83903sec inet6 fe80::5054:ff:fe4d:77d3/64 scope link valid_lft forever preferred_lft forever 3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 08:00:27:a7:3b:77 brd ff:ff:ff:ff:ff:ff inet 192.168.1.125/24 brd 192.168.1.255 scope global noprefixroute eth1 valid_lft forever preferred_lft forever inet 192.168.1.100/24 scope global secondary eth1 valid_lft forever preferred_lft forever inet6 fe80::a00:27ff:fea7:3b77/64 scope link valid_lft forever preferred_lft forever
通过虚拟vip访问 curl -k https://192.168.1.100:6443/version
修改所有k8s node节点连接LoadBalance的VIP 之前我们所有node节点连接的apiserver都是单点的,现在需要改成通过连接VIP走nginx来访问apiserver
sed -i 's#192.168.1.120:6443#192.168.1.100:6443#' /opt/kubernetes/cfg/* systemctl restart kubelet kube-proxy
最后检查节点状态