一、部署前的准备

1.1 了解Kubernetes的更新节奏

当前Kubernetes的版本更新节奏是每年3个版本。官方会进行维护最近的三个次要版本。因此尽量选择在维护期内的版本。

Kubernetes发行版本:
https://kubernetes.io/zh-cn/releases/

代码托管在github上:
https://github.com/kubernetes/kubernetes

1.2 获取Kubernetes与Docker安装包

使用kubeadm安装k8s集群。Ubuntu和Centos操作系统,可使用apt/yum源进行安装。

因此需要Docker的源和Kubernetes的源,在安装时要指定具体版本,防止默认安装最新版本而导致k8s和docker版本不兼容。

1.2.1 Docker源

阿里云apt源

sudo apt update
sudo apt -y install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
sudo apt update -y
#安装阿里云apt源

阿里云yum源

sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
sudo yum makecache fast
#安装阿里云yum源
1.2.2 Kubernetes源

阿里云apt源

apt update && apt install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add - 
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt update
#安装阿里云apt源

阿里云yum源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

1.3 操作系统初始化

#设置相应的主机名
hostnamectl set-hostname k8s-m-01

#关闭swap分区
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab

#添加内核参数
    cat > /etc/sysctl.d/k8s.conf << EOF
    net.ipv4.ip_forward = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.bridge.bridge-nf-call-arptables = 1
    net.ipv3.tcp_tw_reuse = 0
    net.core.somaxconn = 32768
    net.betfilter.nf_conntrack_max=1000000
    vm.swappiness = 0
    vm.max_map_count=655350
    fs.file-max=6553500
    EOF
    sysctl --system

#用户进程限制
vim /etc/security/limits.conf
*            soft    core        unlimited
*            hard    core        unlimited
*            soft    nproc        1000000
*            hard    nproc        1000000
*            soft    nofile        1000000
*            hard    nofile        1000000
*            soft    memlock        32000
*            hard    memlock        32000
*            soft    msgqueue    8192000
*            hard    msgqueue    8192000

root            soft    core        unlimited
root            hard    core        unlimited
root            soft    nproc        1000000
root            hard    nproc        1000000
root            soft    nofile        1000000
root            hard    nofile        1000000
root            soft    memlock        32000
root            hard    memlock        32000
root            soft    msgqueue    8192000
root            hard    msgqueue    8192000

#设置时区为亚洲/上海
timedatectl set-timezone 'Asia/Shanghai'
chronyc makestep
1.3.1 Centos系统额外配置
#关闭防火墙
systemctl stop firewalld && systemctl disable firewalld

#关闭selinux
sed -i "s/SELINUX=enforcing/SELINUX=disabled/" /etc/selinux/config 
setenforce 0

二、部署Docker

2.1 安装Docker

apt

apt-cache madison docker-ce
#查询仓库内的版本

sudo apt update
sudo apt install docker-ce=5:20.10.9~3-0~ubuntu-focal
#安装指定版本Docker

yum

yum list docker-ce.x86_64 --showduplicates | sort -r
#查询仓库内的版本

sudo yum install docker-ce-20.10.9-3.el8
sudo systemctl enable --now docker
#安装指定版本Docker

2.2 配置镜像仓库和容器运行时的Cgroup Driver

在 Linux 中,控制组(CGroup)用于限制分配给进程的资源。

kubelet 和底层容器运行时都需要对接控制组来强制执行 为 Pod 和容器管理资源 并为诸如 CPU、内存这类资源设置请求和限制。若要对接控制组,kubelet 和容器运行时需要使用一个 cgroup 驱动。 关键的一点是 kubelet 和容器运行时需使用相同的 cgroup 驱动并且采用相同的配置。

可用的 cgroup 驱动有两个:cgroupfs 和systemd。

官方文档对此的解释:

https://kubernetes.io/zh-cn/docs/setup/production-environment/container-runtimes/#systemd-cgroup-driver

需要使用systemd驱动。

cat > /etc/docker/daemon.json << EOF
{
  "exec-opts":["native.cgroupdriver=systemd"],
  "registry-mirrors":["https://kn0t2bca.mirror.aliyuncs.com","https://harbor.paper1.cc"],
  "insecure-registries":["kn0t2bca.mirror.aliyuncs.com","harbor.paper1.cc"]
}
EOF

systemctl restart docker
#重启docker应用配置

2.3 安装cri-dockerd(kubernetes在v1.24及以上版本时需要安装)

kubernetes使用CRI(容器运行时接口)插件接口,来使用容器运行时,而docker不支持CRI规范。kubernetes内置了docker-shim作为一个中间层垫片,来协调CRI与Docker。在Kubernetes 1.24版本中docker-shim被删除。因此需要安装cri-dockerd作为docker-shim的替代,以实现类似的功能。

cri-dockerd官方github页面
https://github.com/Mirantis/cri-dockerd

2.3.1 包管理器安装方式

rpm -ivh cri-dockerd-0.3.1-3.el7.x86_64.rpm
#el7版本rpm包安装

dpkg -i cri-dockerd_0.3.1.3-0.ubuntu-focal_amd64.deb.deb
#ubuntu 20.04版本deb安装包

2.3.2 二进制包安装方式

# 由于1.24以及更高版本不支持docker所以安装cri-docker
# 下载cri-docker 
wget  https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.5/cri-dockerd-0.2.5.amd64.tgz

# 解压cri-docker
tar xvf cri-dockerd-0.2.5.amd64.tgz 
cp cri-dockerd/cri-dockerd  /usr/bin/

# 写入启动配置文件
cat >  /usr/lib/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

StartLimitBurst=3

StartLimitInterval=60s

LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

# 写入socket配置文件
cat > /usr/lib/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service

[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target
EOF

# 进行启动cri-docker
systemctl daemon-reload ; systemctl enable cri-docker --now

三、部署k8s

3.1 安装kubeadm、kubectl、kubelet

在节点上安装,node节点可以不装kubectl。

apt

apt-cache madison kubeadm
#查询仓库内的版本

sudo apt update
apt install kubelet=1.23.12-00 kubeadm=1.23.12-00 kubectl=1.23.12-00
#安装指定版本kubeadm kubelet kubeadm

yum

yum list kubeadm --showduplicates
#查询仓库内的版本

yum install kubelet-1.23.12-0 kubeadm-1.23.12-0 kubectl-1.23.12-0
systemctl enable --now kubelet
#安装指定版本kubeadm kubelet kubeadm

3.2 开启kubeadm命令自动补全功能(可选)

mkdir /data/scripts -p
kubeadm completion bash > /data/scripts/kubeadm_completion.sh
source /data/scripts/kubeadm_completion.sh
echo "source /data/scripts/kubeadm_completion.sh"  >> /etc/profile
#安装自动kubeadm自动补全功能

3.3 整合kubelet与cri-dockerd(kubernetes v1.24 及以上版本)

3.3.1 指定cri-dockerd的CNI插件

vim /usr/lib/systemd/system/cri-docker.service

ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-cache-dir=/var/lib/cni/cache --cni-conf-dir=/etc/cni/net.d

systemctl daemon-reload
systemctl restart cri-docker
#重载systemd配置并重启cri-dockerd

3.3.2 连接cri-dockerd和kubelet

mkdir /etc/sysconfig -p
#建立配置文件路径
vim /etc/sysconfig/kebulet

KUBELET_KUBEADM_ARGS="--container-runtime=remote --container-runtime-endpoint=/run/cri-dockerd.sock"

#配置kubelet参数变量

3.3.4 cri-dockerd 二进制安装方式

# 1.25 版本所有k8s节点配置kubelet service
cat > /usr/lib/systemd/system/kubelet.service << EOF

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service

[Service]
ExecStart=/usr/local/bin/kubelet \\
    --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig  \\
    --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
    --config=/etc/kubernetes/kubelet-conf.yml \\
    --container-runtime-endpoint=unix:///run/cri-dockerd.sock  \\
    --node-labels=node.kubernetes.io/node=

[Install]
WantedBy=multi-user.target
EOF


# 1.24 版本所有k8s节点配置kubelet service
cat > /usr/lib/systemd/system/kubelet.service << EOF

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service

[Service]
ExecStart=/usr/local/bin/kubelet \\
    --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig  \\
    --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
    --config=/etc/kubernetes/kubelet-conf.yml \\
    --container-runtime=remote  \\
    --runtime-request-timeout=15m  \\
    --container-runtime-endpoint=unix:///run/cri-dockerd.sock  \\
    --cgroup-driver=systemd \\
    --node-labels=node.kubernetes.io/node= \\
    --feature-gates=IPv6DualStack=true

[Install]
WantedBy=multi-user.target
EOF

# 重启
systemctl daemon-reload
systemctl restart kubelet
systemctl enable --now kubelet

3.4 提前下载镜像(可选)

k8s镜像

kubeadm config images list --kubernetes-version v1.23.12
#列出指定版本所需镜像

k8s.gcr.io/kube-apiserver:v1.23.12
k8s.gcr.io/kube-controller-manager:v1.23.12
k8s.gcr.io/kube-scheduler:v1.23.12
k8s.gcr.io/kube-proxy:v1.23.12
k8s.gcr.io/pause:3.6
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
#所需镜像

kubeadm config images pull --cri-socket unix:///run/cri-dockerd.sock
#方法一:使用kubeadm拉去所需镜像,会默认从google仓库拉取,可能会无法连接。

docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.24.10
docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.24.10
docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.24.10
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.24.10
docker pull registry.aliyuncs.com/google_containers/pause:3.7
docker pull registry.aliyuncs.com/google_containers/etcd:3.5.6-0
docker pull registry.aliyuncs.com/google_containers/coredns:v1.8.6
#方法二:master上使用docker pull提前从阿里云镜像下载好全部所需镜像

3.5 初始化Master节点

kubeadm init \
--apiserver-advertise-address=192.168.168.128 \
--apiserver-bind-port 6443 \
--kubernetes-version v1.23.12 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--service-dns-domain=cluster.local \
--image-repository registry.aliyuncs.com/google_containers

#--apiserver-advertise-address=配置为master主机IP地址。
#--pod-network-cidr=10.244.0.0/16 pod地址网段,与kube-flannel.yml中默认地址一致。

3.6 初始化完成

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.3.204:6443 --token hntmfb.2n2bulfht4bn6toi \
    --discovery-token-ca-cert-hash sha256:cbc255a021183b60c3759556658dad556091f12fa6074a698999fb5f05fd96d3 

3.7 按说明执行命令

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#执行后才可以使用kubectl命令
#admin.conf认证文件,当其它安装了kubectl的节点具备此认证文件,就可以执行kubectl命令。

3.8 将Node节点添加进集群

kubeadm join 192.168.3.204:6443 --token hntmfb.2n2bulfht4bn6toi \
    --discovery-token-ca-cert-hash sha256:cbc255a021183b60c3759556658dad556091f12fa6074a698999fb5f05fd96d3 

3.9 部署CNI网络插件

K8S官方网络组件清单

https://kubernetes.io/zh-cn/docs/concepts/cluster-administration/addons/

1.fannel网络插件

#在Master节点执行
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

#提前下载镜像flannel需要的镜像,可加速网络安装过程。
#所需镜像在kube-flannel.yml中,版本可能会有变化。
docker pull docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
docker pull docker.io/rancher/mirrored-flannelcni-flannel:v0.20.0

#应用指定的配置
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

#列出运行中的pod信息
kubectl get pods -n kube-system
#-n 指定namespace为kube-system

#在Master上查看集群节点信息
kubectl get node

#输出内容如下:
NAME            STATUS   ROLES                  AGE     VERSION
k8s-master-01   Ready    control-plane,master   2m15s   v1.23.12
k8s-node-01     Ready    <none>                 117s    v1.23.12
k8s-node-02     Ready    <none>                 114s    v1.23.12
#一段时间后node的status变为Ready。

kube-flannel.yml

---
kind: Namespace
apiVersion: v1
metadata:
  name: kube-flannel
  labels:
    pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
       #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
       #image: flannelcni/flannel:v0.20.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.0
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
       #image: flannelcni/flannel:v0.20.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate

四、Kubernetes与Docker

Kubernetes是一个容器编排引擎,Docker是一个容器运行时(其它容器运行时还有containerd、CRI-O等)。

Kubernetes通过CRI(容器运行时接口)与容器运行时进行交互。

Docker Engine 没有实现 CRI,因此Kubernetes是无法通过CRI直接与Docker进行交互。从而需要一个中间层来与Docker进行交互,这个中间层就是cri-dockerd。v1.24 之前的 Kubernetes 版本直接集成了 Docker Engine 的一个组件,名为 dockershim。

在Kubernetes v1.24版本及此后的版本,如果需要使用Docker来作为容器运行时,则需要安装cri-dockered。

cri-dockered Github 页面:

https://github.com/Mirantis/cri-dockerd

五、高可用集群

基于keepalived实现高可用VIP,通过haproxy实现kube-apiserver的反向代理,然后将对应kube-apiserver的请求转发到haproxy上。

5.1 haproxy配置

global
    maxconn 100000
    chroot /apps/haproxy
    stats socket /apps/haproxy/haproxy.sock mode 600 level admin
    uid 99
    gid 99
    daemon
    pidfile /apps/haproxy/haproxy.pid
    log 127.0.0.1 local3 info

defaults
    option redispatch
    option http-keep-alive
    maxconn 100000
    timeout http-keep-alive 120s
    timeout connect 600s
    timeout server 600s
    timeout client 600s
    timeout check 3s

listen stats
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri /haproxy-status
    stats auth admin:12345678

listen k8s-apiserver
    bind 192.168.3.38:6443
    mode tcp
    log global
    balance roundrobin
    server k8s-m1 192.168.3.31:6443 check inter 3s fall 3 rise 5 weight 1
#    server k8s-m2 192.168.3.32:6443 check inter 3s fall 3 rise 5 weight 1
#    server k8s-m3 192.168.3.33:6443 check inter 3s fall 3 rise 5 weight 1
#    设置后端服务器,先放开一个master,等后面的master加入后,再放开全部后端。

5.2 初始化首个master

kubeadm init \
--apiserver-advertise-address=192.168.3.31 \
--control-plane-endpoint=192.168.3.38 \
--apiserver-bind-port 6443 \
--kubernetes-version v1.23.12 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--service-dns-domain=cluster.local \
--image-repository registry.aliyuncs.com/google_containers
#--control-plane-endpoint=192.168.3.38 指定VIP地址,也可指定域名。

5.3 基于文件进行master节点初始化(可选)

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.3.31
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  imagePullPolicy: IfNotPresent
  name: node
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerPlaneEndpoint: 192.168.3.38:6443
controllerManager: {}
dns: {}
    type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.23.12
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}
kubeadm init --config kubeadm-init.yaml
#基于文件执行k8s master初始化

5.4 安装flannel网络

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

5.5 添加其它master节点到集群中

在master节点中执行

kubeadm init phase upload-certs --upload-certs
#生成certificate-key密钥

[upload-certs] Using certificate key:
7f6178f6a551fcb82002adfa0ae121b205b189603f22771115a61b9da46bd180
#生成了一个key

在其它master节点中执行

#其它master节点安装完成kubelet kubeadm kubectl后,不执行master kubeadm init命令。
#而是执行kubeadm join命令作为master角色加入到现有集群中来。

  kubeadm join 192.168.3.38:6443 --token jhchd8.ce3fitje6gtngv46 \
    --discovery-token-ca-cert-hash sha256:d00bbf53e5190dca9ca19ecb086cc2368e37e47251fec35c21449c7973dbd13a \
    --control-plane 
    --certificate-key 7f6178f6a551fcb82002adfa0ae121b205b189603f22771115a61b9da46bd180
#在master添加命令后面增加--certificate-key 参数,并填写上一步生成的密钥。

5.6添加Node节点到集群中

  kubeadm join 192.168.3.38:6443 --token 7od4bf.bakobwmtwyqqcfsi \
    --discovery-token-ca-cert-hash sha256:08fa52e8b3daa94f6efb87eef37b5b3e2bd2ae655aee734ec8411d47e2a8617b 

5.7 测试集群网络通信

kubectl run net-test1 --image=alpine sleep 360000
kubectl run net-test2 --image=alpine sleep 360000
#创建2个测试用pod。
#测试pod需要允许在不同的node节点上,以便测试网络。

kubectl get pod -o wide
#查看pod状态,查看pod ip地址。

kubectl exec -it net-test1 sh
#进入测试pod1。

ping 0.0.0.0
#通过ping命令测试与不同node主机之间的通信。
#测试与外网通信。

5.8 删除节点

标记为不可调度的节点

kubectl drain <node-name> --ignore-daemonsets --delete-local-data

移除节点

kubectl delete node <node-name>

检查个节点状态

kubectl get nodes

清理旧的 Kubernetes 配置

sudo rm -rf /etc/kubernetes /var/lib/kubelet /var/lib/kubernetes

六、部署dashboard

github地址:
https://github.com/kubernetes/dashboard
不同版本的dashboard兼容不同的b8s版本

查看页面中Compatibility表即可。

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.1/aio/deploy/recommended.yaml
#在线下载dashboard yaml文件

需要两个镜像,可以提前下载

docker pull kubernetesui/dashboard:v2.5.1
docker pull kubernetesui/metrics-scraper:v1.0.7

#下载后上传到私有仓库中,再修改yaml文件中的镜像即可。

修改yaml配置

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30002
  selector:
    k8s-app: kubernetes-dashboard

---
#添加type: NodePort
#添加nodePort: 30002

添加kubernetes-dashboard用户

#创建yaml文件
vim admin-user.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
  
#创建用户,绑定角色
kubectl apply -y admin-user.yaml
#应用yaml

使用命令行获取用户token

kubectl get secret -A |grep admin-user
kubectl describe secret admin-user-token-xxxx -n kubernetes-dashboard
#复制完整token,用于登陆


kubectl describe secret -n kubernetes-dashboard $(kubectl get secret -n kubernetes-dashboard | grep dashboard-admin | awk '{print $1}')
#获取用户密钥

登陆dashboard

#访问node ip地址,使用nodePort: 30002中定义的端口。

七、集群维护

7.1 重置K8S环境

kubeadm reset
#执行节点重置
#执行后会清除kubeadm init命令 和kubeadm join命令的执行结果。

rm -fr $HOME/.kube
#再手动删除目录(如有)

reboot
#重启服务器

7.2 生成新的token

默认的token有效时间为24小时。过期后需要在Master节点上重建token。

kubeadm token create --print-join-command

7.3 重新生成证书

7.3.1 查看证书有效期
kubeadm certs check-expiration
#查看证书有效期
7.3.2 更新证书
kubeadm certs renew all
#如果有多个master节点,需要在每个master节点上执行。

更新后需要重启kube-apiserver,kube-controller-manager, kube-scheduler 和 etcd。

# 重启各个组件的 Pod
kubectl -n kube-system delete pod -l component=kube-apiserver
kubectl -n kube-system delete pod -l component=kube-controller-manager
kubectl -n kube-system delete pod -l component=kube-scheduler
kubectl -n kube-system delete pod -l component=etcd

八、升级k8s版本

8.1 安装新版本kubeadm

kubeadm version
#验证当前版本

kubectl get node
#验证node节点版本

8.2 升级K8Smaster节点版本

apt-cache madison kubeadm
#查看仓库内版本

apt install kubeadm=1.23.13-00
#安装指定版本

kubeadm vsersion
#查看版本

kubeadm upgrade plan
#查看升级计划

升级计划

[upgrade/config] Making sure the configuration is correct:
[upgrade/config] Reading configuration from the cluster...
[upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks.
[upgrade] Running cluster health checks
[upgrade] Fetching available versions to upgrade to
[upgrade/versions] Cluster version: v1.23.12
[upgrade/versions] kubeadm version: v1.23.13
I1031 09:51:56.636977  346877 version.go:255] remote version is much newer: v1.25.3; falling back to: stable-1.23
[upgrade/versions] Target version: v1.23.13
[upgrade/versions] Latest version in the v1.23 series: v1.23.13

Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
COMPONENT   CURRENT        TARGET
kubelet     5 x v1.23.12   v1.23.13

Upgrade to the latest version in the v1.23 series:

COMPONENT                 CURRENT    TARGET
kube-apiserver            v1.23.12   v1.23.13
kube-controller-manager   v1.23.12   v1.23.13
kube-scheduler            v1.23.12   v1.23.13
kube-proxy                v1.23.12   v1.23.13
CoreDNS                   v1.8.6     v1.8.6
etcd                      3.5.1-0    3.5.1-0

You can now apply the upgrade by executing the following command:

    kubeadm upgrade apply v1.23.13

_____________________________________________________________________


The table below shows the current state of component configs as understood by this version of kubeadm.
Configs that have a "yes" mark in the "MANUAL UPGRADE REQUIRED" column require manual config upgrade or
resetting to kubeadm defaults before a successful upgrade can be performed. The version to manually
upgrade to is denoted in the "PREFERRED VERSION" column.

API GROUP                 CURRENT VERSION   PREFERRED VERSION   MANUAL UPGRADE REQUIRED
kubeproxy.config.k8s.io   v1alpha1          v1alpha1            no
kubelet.config.k8s.io     v1beta1           v1beta1             no
_____________________________________________________________________

提前下载所需镜像(可选)

docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.13
docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.13
docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.13
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.23.13
#通过upgrade plan中查看所需升级的组件
#部分组件版本不变。
kubeadm upgrade apply v1.23.13
#执行升级操作
[upgrade/config] Making sure the configuration is correct:
[upgrade/config] Reading configuration from the cluster...
[upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W1031 10:00:31.471358  349912 utils.go:69] The recommended value for "resolvConf" in "KubeletConfiguration" is: /run/systemd/resolve/resolv.conf; the provided value is: /run/systemd/resolve/resolv.conf
[preflight] Running pre-flight checks.
[upgrade] Running cluster health checks
[upgrade/version] You have chosen to change the cluster version to "v1.23.13"
[upgrade/versions] Cluster version: v1.23.12
[upgrade/versions] kubeadm version: v1.23.13
[upgrade/confirm] Are you sure you want to proceed with the upgrade? [y/N]: 
#输入y确认升级
[upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.23.13". Enjoy!

[upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so.
#升级成功
kubectl get node
NAME            STATUS   ROLES                  AGE   VERSION
k8s-master-01   Ready    control-plane,master   17h   v1.23.12
k8s-master-02   Ready    control-plane,master   17h   v1.23.12
k8s-master-03   Ready    control-plane,master   17h   v1.23.12
k8s-node-01     Ready    <none>                 16h   v1.23.12
k8s-node-02     Ready    <none>                 16h   v1.23.12
#查看版本显示为旧版本
apt install kubectl=1.23.13-00 kubelet=1.23.13-00 kubeadm=1.23.13-00
#升级kubelet、kubectl、kubeadm
kubectl get node
NAME            STATUS   ROLES                  AGE   VERSION
k8s-master-01   Ready    control-plane,master   17h   v1.23.13
k8s-master-02   Ready    control-plane,master   17h   v1.23.13
k8s-master-03   Ready    control-plane,master   17h   v1.23.13
k8s-node-01     Ready    <none>                 17h   v1.23.12
k8s-node-02     Ready    <none>                 17h   v1.23.12
#再次查看版本,版本号已经升级。

8.3升级node节点

apt install kubectl=1.23.13-00 kubelet=1.23.13-00 kubeadm=1.23.13-00
#所有节点安装新版本kubelet、kubectl、kubeadm
kubectl get node
NAME            STATUS   ROLES                  AGE   VERSION
k8s-master-01   Ready    control-plane,master   17h   v1.23.13
k8s-master-02   Ready    control-plane,master   17h   v1.23.13
k8s-master-03   Ready    control-plane,master   17h   v1.23.13
k8s-node-01     Ready    <none>                 17h   v1.23.13
k8s-node-02     Ready    <none>                 17h   v1.23.13
#再次查看版本,版本号已经升级。

(升级完成)

标签: Kubernetes

添加新评论