环境准备

IP 配置 主机名 组件
192.168.83.128 2c4g k8s-master apiserver、controller-manager、scheduler、etcd
192.168.83.129 2c4g k8s-node1 kubelet、kube-proxy、docker、flannel、etcd
192.168.83.130 2c4g k8s-node2 kubelet、kube-proxy、docker、flannel、etcd

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

关闭selinux

setenforce 0

关闭swap

swapoff -a

添加主机名与IP对应关系(记得设置主机名)

vi /etc/hosts
192.168.83.128 k8s-master
192.168.83.129 k8s-node1
192.168.83.130 k8s-node2

将桥接的IPv4流量传递到iptables的链

cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

使配置生效

sysctl --system

同步时间

cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
yum install ntpdate -y
ntpdate ntp1.aliyun.com

部署Etcd集群

需要使用cfssl来签发证书,下载cfssl工具

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

生成证书

vim ca-config.json

{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}

vim ca-csr.json

{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}

vim server-csr.json

# 修改下面三个ip
{
"CN": "etcd",
"hosts": [
"192.168.83.128",
"192.168.83.129",
"192.168.83.130"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}

生成证书

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

部署Etcd

下载二进制包:https://github.com/coreos/etcd/releases/tag/v3.2.12

以下步骤三台机器相同,只需要将etcd配置中的ip修改成对应的机器ip即可

创建etcd目录

# 解压二进制包
tar zxvf etcd-v3.2.12-linux-amd64.tar.gz
mkdir -p /opt/etcd/{bin,cfg,ssl}
cp etcd-v3.2.12-linux-amd64/etcd /opt/etcd/bin/
cp etcd-v3.2.12-linux-amd64/etcdctl /opt/etcd/bin/
# 将刚刚生成的证书拷贝到三台机器的/opt/etcd/ssl目录下
cp ca*pem server*pem /opt/etcd/ssl

创建etcd配置文件,需要将ip修改成对应node机器的ip

vim /opt/etcd/cfg/etcd

#[Member]
# 节点名称
ETCD_NAME="etcd01"
# 数据目录
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
# 集群通信监听地址
ETCD_LISTEN_PEER_URLS="https://192.168.83.128:2380"
# 客户端访问监听地址
ETCD_LISTEN_CLIENT_URLS="https://192.168.83.128:2379"

#[Clustering]
# 集群通告地址
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.83.128:2380"
# 客户端通告地址
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.83.128:2379"
# 集群节点地址
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.83.128:2380,etcd02=https://192.168.83.129:2380,etcd03=https://192.168.83.130:2380"
# 集群token
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
# 加入集群的当前状态,new表示新集群,existing表示加入已有集群
ETCD_INITIAL_CLUSTER_STATE="new"

systemd管理etcd

vim /usr/lib/systemd/system/etcd.service

[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd
ExecStart=/opt/etcd/bin/etcd \
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=/opt/etcd/ssl/server.pem \
--key-file=/opt/etcd/ssl/server-key.pem \
--peer-cert-file=/opt/etcd/ssl/server.pem \
--peer-key-file=/opt/etcd/ssl/server-key.pem \
--trusted-ca-file=/opt/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

启动etcd并设置开机自启

systemctl start etcd
systemctl enable etcd

检查集群状态

/opt/etcd/bin/etcdctl \
--ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem \
--endpoints="https://192.168.83.128:2379,https://192.168.83.129:2379,https://192.168.83.130:2379" \
cluster-health

部署Docker

在Node上执行

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce -y
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://bc437cce.m.daocloud.io
systemctl start docker
systemctl enable docker

部署Flannel

向etcd中写入预定义子网段

/opt/etcd/bin/etcdctl \
--ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem \
--endpoints="https://192.168.83.128:2379,https://192.168.83.129:2379,https://192.168.83.130:2379" \
set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'

以下操作都在每个Node中执行

wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
tar zxvf flannel-v0.10.0-linux-amd64.tar.gz
mkdir /opt/kubernetes/{bin,cfg,ssl} -p
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin

配置flannel

vim /opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.83.128:2379,https://192.168.83.129:2379,https://192.168.83.130:2379 -etcd-cafile=/opt/etcd/ssl/ca.pem -etcd-certfile=/opt/etcd/ssl/server.pem -etcd-keyfile=/opt/etcd/ssl/server-key.pem"

systemd管理flannel

vim /usr/lib/systemd/system/flanneld.service

[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

在docker启动项中指定子网段

vim /usr/lib/systemd/system/docker.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

重启flannel和docker

systemctl daemon-reload
systemctl restart flanneld
systemctl enable flanneld
systemctl restart docker

通过 ip a 命令查看并确保docker0和flannel1在一个网段

在当前节点访问另一个Node节点docker0 IP,确保能够通信

在Master上部署组件

生成证书

生成CA证书

cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF

执行

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

生成apiserver证书

# 将192.168.83.128修改成master节点ip
cat > server-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.83.128",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF

执行

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

生成kube-proxy证书

cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF

执行

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

最后所有的证书

ls *pem
ca-key.pem ca.pem kube-proxy-key.pem kube-proxy.pem server-key.pem server.pem

部署apiserver

下载二进制包:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.18.md

下载 kubernetes-server-linux-amd64.tar.gz 即可

tar zxvf kubernetes-server-linux-amd64.tar.gz
mkdir /opt/kubernetes/{bin,cfg,ssl} -p
cd kubernetes/server/bin/
cp kube-apiserver kube-scheduler kube-controller-manager kubectl /opt/kubernetes/bin

将刚刚生成的证书拷贝过来

cp *pem /opt/kubernetes/ssl

创建token文件

# 文件内容依次是 随机字符串,用户名,UID,用户组
cat > /opt/kubernetes/cfg/token.csv << EOF
674c457d4dcf2eefe4920d7dbb6b0ddc,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

创建apiserver配置文件

vim /opt/kubernetes/cfg/kube-apiserver

KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.83.128:2379,https://192.168.83.129:2379,https://192.168.83.130:2379 \
--bind-address=192.168.83.128 \
--secure-port=6443 \
--advertise-address=192.168.83.128 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"

参数说明:

  • –logtostderr 启用日志
  • –v 日志等级
  • –etcd-servers etcd集群地址
  • –bind-address 监听地址
  • –secure-port https安全端口
  • –advertise-address 集群通告地址
  • –allow-privileged 启用授权
  • –service-cluster-ip-range service虚拟ip地址段
  • –authorization-mode 认证授权,启用RBAC授权和节点自管理
  • –enable-bootstrap-token-auth 启用TLS bootstrap功能
  • –token-auth-file token文件
  • –service-node-port-range service node类型默认分配的端口范围

systemd管理apiserver

vim /usr/lib/systemd/system/kube-apiserver.service

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

启动

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver

部署scheduler

vim /opt/kubernetes/cfg/kube-scheduler

KUBE_SCHEDULER_OPTS="--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect"

参数说明:

  • –master 连接本地的apiserver
  • –leader-elect 当前组件启动多个时,自动选举

systemd管理schduler

vim /usr/lib/systemd/system/kube-scheduler.service

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

启动

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler

部署controller-manager

vim /opt/kubernetes/cfg/kube-controller-manager

KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect=true \
--address=127.0.0.1 \
--service-cluster-ip-range=10.0.0.0/24 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem"

systemd管理controller-manager

vim /usr/lib/systemd/system/kube-controller-manager.service

[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

启动

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager

所有组件都启动成功后,查看集群组件状态

/opt/kubernetes/bin/kubectl get cs

在Node上部署组件

将 kubelet-bootstrap 用户绑定到系统集群角色

/opt/kubernetes/bin/kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

创建 kubeconfig 文件

在Master上的生成证书的目录下执行下面命令:

# 创建 kubelet bootstrapping kubeconfig 变量
BOOTSTRAP_TOKEN=674c457d4dcf2eefe4920d7dbb6b0ddc
KUBE_APISERVER="https://192.168.83.128:6443"
# 设置集群参数
/opt/kubernetes/bin/kubectl config set-cluster kubernetes \
--certificate-authority=./ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
/opt/kubernetes/bin/kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
# 设置上下文参数
/opt/kubernetes/bin/kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
# 设置默认上下文
/opt/kubernetes/bin/kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
# 创建kube-proxy kubeconfig文件
/opt/kubernetes/bin/kubectl config set-cluster kubernetes \
--certificate-authority=./ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig

/opt/kubernetes/bin/kubectl config set-credentials kube-proxy \
--client-certificate=./kube-proxy.pem \
--client-key=./kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig

/opt/kubernetes/bin/kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig

/opt/kubernetes/bin/kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

执行完成后当前证书目录下会生成 bootstrap.kubeconfig kube-proxy.kubeconfig这2个文件,将他们拷贝到Node节点的 /opt/kubernetes/cfg 目录下

部署kubelet

将之前二进制包里面的kubelet和kube-proxy拷贝到Node上的/opt/kubernetes/bin目录下

下面在所有Node节点上执行,注意需要修改对应的ip:

创建kubelet配置文件

vim /opt/kubernetes/cfg/kubelet

KUBELET_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.83.129 \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet.config \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

参数说明:

  • –hostname-override 在集群中显示的主机名
  • –kubeconfig 指定 kubeconfig 文件位置,会自动生成
  • –bootstrap-kubeconfig 指定刚才生成的 bootstrap.kubeconfig 文件
  • –cert-dir 颁发证书存放位置
  • –pod-infra-container-image 管理Pod网络的镜像

创建kubelet.config配置文件

vim /opt/kubernetes/cfg/kubelet.config

kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.83.129
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS: ["10.0.0.2"]
clusterDomain: cluster.local.
failSwapOn: false
authentication:
anonymous:
enabled: true

systemd管理kubelet

vim /usr/lib/systemd/system/kubelet.service

[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target

启动

systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet

在Master节点审批Node加入集群

/opt/kubernetes/bin/kubectl get csr
/opt/kubernetes/bin/kubectl certificate approve <CONDITION>
/opt/kubernetes/bin/kubectl get node

部署kube-proxy

vim /opt/kubernetes/cfg/kube-proxy

KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.83.129 \
--cluster-cidr=10.0.0.0/24 \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"

systemd管理kube-proxy

vim /usr/lib/systemd/system/kube-proxy.service

[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

启动

systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy

查看集群状态

/opt/kubernetes/bin/kubectl get cs
/opt/kubernetes/bin/kubectl get node

测试

vim nginx.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
selector:
app: nginx
type: NodePort
ports:
- port: 80
targetPort: 80

应用

/opt/kubernetes/bin/kubectl apply -f nginx.yaml

通过node ip+nodeport访问

注意:

如果你的Master不想承担工作负载,那么无需在master部署docker、flannel、kubelet和kube-proxy;反之,则在master上执行上面docker部署和flannel部署的操作

最后同意master的自签证请求,将master加入集群