Centos7离线部署kubernetes 1.13集群记录

时间:2019-06-12
本文章向大家介绍Centos7离线部署kubernetes 1.13集群记录,主要包括Centos7离线部署kubernetes 1.13集群记录使用实例、应用技巧、基本知识点总结和需要注意事项,具有一定的参考价值,需要的朋友可以参考一下。

一、说明

本篇主要参考kubernetes中文社区的一篇部署文章(CentOS 使用二进制部署 Kubernetes 1.13集群),并做了更详细的记录以备用。

二、部署环境

1、kubernetes 版本:1.13,二进制文件在参考文章中有下载。

2、本地部署环境

ip hostname version
10.0.3.107 manager107 3.10.0-957.1.3.el7.x86_64
10.0.3.68 worker68 3.10.0-957.1.3.el7.x86_64
10.0.3.80 worker80 3.10.0-957.1.3.el7.x86_64

3、部署网络说明

参考CentOS 使用二进制部署 Kubernetes 1.13集群

三、kubernetes 安装及配置

1、创建临时目录

#存放etcd证书及配置文件
[root@manager107 ~]# mkdir -p /home/workspace/etcd
#存放k8s证书及配置文件
[root@manager107 ~]# mkdir -p /home/workspace/k8s
#存放k8s安装文件
[root@manager107 ~]# mkdir -p /home/workspace/packages

2、设置关闭防火墙、Swap及SELINUX

3台服务器上执行:

systemctl stop firewalld && systemctl disable firewalld
setenforce 0
swapoff -a && sysctl -w vm.swappiness=0 vi /etc/selinux/config SELINUX=disabled

3、安装docker

4、创建安装目录

[root@manager107 ~]# mkdir /k8s/etcd/{bin,cfg,ssl} -p
[root@manager107 ~]# mkdir /k8s/kubernetes/{bin,cfg,ssl} -p

5、安装及配置CFSSL

[root@manager107 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@manager107 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@manager107 ~]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@manager107 ~]# chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
[root@manager107 ~]# mv cfssl_linux-amd64 /usr/local/bin/cfssl
[root@manager107 ~]# mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
[root@manager107 ~]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

6、创建认证证书

[root@manager107 ~]# cd /home/workspace/etcd
#创建 ETCD 证书
[root@manager107 etcd]# cat << EOF | tee ca-config.json
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "www": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF
#创建 ETCD CA 配置文件
[root@manager107 etcd]# cat << EOF | tee ca-csr.json
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Shenzhen",
            "ST": "Shenzhen"
        }
    ]
}
EOF
#创建 ETCD Server 证书
[root@manager107 etcd]# cat << EOF | tee server-csr.json
{
    "CN": "etcd",
    "hosts": [
    "10.0.3.107",
    "10.0.3.68",
    "10.0.3.80"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Shenzhen",
            "ST": "Shenzhen"
        }
    ]
}
EOF
#生成 ETCD CA 证书和私钥
[root@manager107 etcd]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
[root@manager107 etcd]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
[root@manager107 etcd]# cd /home/workspace/k8s/
#创建 Kubernetes CA 证书
[root@manager107 k8s]# cat << EOF | tee ca-config.json
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF
[root@manager107 k8s]# cat << EOF | tee ca-csr.json
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Shenzhen",
            "ST": "Shenzhen",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF
[root@manager107 k8s]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#生成API_SERVER证书
[root@manager107 k8s]# cat << EOF | tee server-csr.json
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "10.0.3.107",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Shenzhen",
            "ST": "Shenzhen",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF
[root@manager107 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
#创建 Kubernetes Proxy 证书
[root@manager107 k8s]# cat << EOF | tee kube-proxy-csr.json
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "Shenzhen",
      "ST": "Shenzhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
[root@manager107 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

7、ssh-key认证

[root@manager107 ~]# ssh-keygen
[root@manager107 ~]# ssh-copy-id 10.0.3.68
[root@manager107 ~]# ssh-copy-id 10.0.3.80

8、部署etcd

[root@manager107 workspace]# cd /home/workspace/packages/k8s1.13-centos
[root@manager107 k8s1.13-centos]# tar -xvf etcd-v3.3.10-linux-amd64.tar.gz
[root@manager107 k8s1.13-centos]# cd etcd-v3.3.10-linux-amd64/
[root@manager107 etcd-v3.3.10-linux-amd64]# cp etcd etcdctl /k8s/etcd/bin/
[root@manager107 etcd-v3.3.10-linux-amd64]# vim /k8s/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.0.3.107:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.0.3.107:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.3.107:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.0.3.107:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://10.0.3.107:2380,etcd02=https://10.0.3.68:2380,etcd03=https://10.0.3.80:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
#创建 etcd的 systemd unit 文件
[root@manager107 etcd-v3.3.10-linux-amd64]# vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/k8s/etcd/cfg/etcd
ExecStart=/k8s/etcd/bin/etcd \
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=/k8s/etcd/ssl/server.pem \
--key-file=/k8s/etcd/ssl/server-key.pem \
--peer-cert-file=/k8s/etcd/ssl/server.pem \
--peer-key-file=/k8s/etcd/ssl/server-key.pem \
--trusted-ca-file=/k8s/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/k8s/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
#拷贝证书文件
[root@manager107 etcd-v3.3.10-linux-amd64]# cd /home/workspace/etcd/
[root@manager107 etcd]# cp ca*pem server*pem /k8s/etcd/ssl
#将启动文件、配置文件拷贝到 节点68、节点80
[root@manager107 etcd]# cd /k8s/
[root@manager107 k8s]# scp -r etcd 10.0.3.68:/k8s/etcd/
[root@manager107 k8s]# scp -r etcd 10.0.3.80:/k8s/etcd/
[root@manager107 k8s]# scp /usr/lib/systemd/system/etcd.service  10.0.3.68:/usr/lib/systemd/system/etcd.service
[root@manager107 k8s]# scp /usr/lib/systemd/system/etcd.service  10.0.3.80:/usr/lib/systemd/system/etcd.service
#在68上修改etcd配置文件
[root@worker68 ~]# vim /k8s/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.0.3.68:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.0.3.68:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.3.68:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.0.3.68:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://10.0.3.107:2380,etcd02=https://10.0.3.68:2380,etcd03=https://10.0.3.80:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
#在80上修改etcd配置文件
[root@worker80 ~]# vim /k8s/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.0.3.80:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.0.3.80:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.3.80:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.0.3.80:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://10.0.3.107:2380,etcd02=https://10.0.3.68:2380,etcd03=https://10.0.3.80:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
#80启动etcd
[root@worker80 ~]# systemctl daemon-reload
[root@worker80 ~]# systemctl enable etcd
[root@worker80 ~]# systemctl start etcd
#68启动etcd
[root@worker68 ~]# systemctl daemon-reload
[root@worker68 ~]# systemctl enable etcd
[root@worker68 ~]# systemctl start etcd
#107启动etcd
[root@manager107 ~]# systemctl daemon-reload
[root@manager107 ~]# systemctl enable etcd
[root@manager107 ~]# systemctl start etcd
#验证集群是否正常运行
[root@manager107 ~]# /k8s/etcd/bin/etcdctl --ca-file=/k8s/etcd/ssl/ca.pem \
--cert-file=/k8s/etcd/ssl/server.pem \
--key-file=/k8s/etcd/ssl/server-key.pem \
--endpoints="https://10.0.3.107:2379,https://10.0.3.68:2379,https://10.0.3.80:2379" \
cluster-health

9、部署Flannel网络

#向 etcd 写入集群 Pod 网段信息
[root@manager107 ssl]# cd /k8s/etcd/ssl/
[root@manager107 ssl]# /k8s/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem \
--key-file=server-key.pem \
--endpoints="https://10.0.3.107:2379,https://10.0.3.68:2379,https://10.0.3.80:2379" \
set /coreos.com/network/config '{ "Network": "172.20.0.0/16", "Backend": {"Type": "vxlan"}}'
#解压安装
[root@manager107 ssl]# cd /home/workspace/packages/k8s1.13-centos
[root@manager107 k8s1.13-centos]# tar -xvf flannel-v0.10.0-linux-amd64.tar.gz
[root@manager107 k8s1.13-centos]# mv flanneld mk-docker-opts.sh /k8s/kubernetes/bin/
#配置Flannel
[root@manager107 k8s1.13-centos]# vim /k8s/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=https://10.0.3.107:2379,https://10.0.3.68:2379,https://10.0.3.80:2379 -etcd-cafile=/k8s/etcd/ssl/ca.pem -etcd-certfile=/k8s/etcd/ssl/server.pem -etcd-keyfile=/k8s/etcd/ssl/server-key.pem"
#创建 flanneld 的 systemd unit 文件
[root@manager107 k8s1.13-centos]# vim /usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/k8s/kubernetes/cfg/flanneld
ExecStart=/k8s/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS
ExecStartPost=/k8s/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target
#配置Docker启动指定子网段
[root@manager107 ~]# vim /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env
# ExecStart=/usr/bin/dockerd  -H tcp://0.0.0.0:2376 -H unix://
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this option.
TasksMax=infinity

# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes

# kill only the docker process, not all processes in the cgroup
KillMode=process

[Install]
WantedBy=multi-user.target
[root@worker68 ~]# vim /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env
# ExecStart=/usr/bin/dockerd  -H tcp://0.0.0.0:2376 -H unix://
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this option.
TasksMax=infinity

# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes

# kill only the docker process, not all processes in the cgroup
KillMode=process

[Install]
WantedBy=multi-user.target
[root@worker80 ~]# vim /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env
# ExecStart=/usr/bin/dockerd  -H tcp://0.0.0.0:2376 -H unix://
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this option.
TasksMax=infinity

# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes

# kill only the docker process, not all processes in the cgroup
KillMode=process

[Install]
WantedBy=multi-user.target
#将flanneld systemd unit 文件到所有节点
[root@manager107 ~]# cd /k8s
[root@manager107 k8s]# scp -r kubernetes 10.0.3.68:/k8s/kubernetes 
[root@manager107 k8s]# scp -r kubernetes 10.0.3.80:/k8s/kubernetes 
[root@manager107 k8s]# scp /k8s/kubernetes/cfg/flanneld 10.0.3.68:/k8s/kubernetes/cfg/flanneld
[root@manager107 k8s]# scp /k8s/kubernetes/cfg/flanneld 10.0.3.80:/k8s/kubernetes/cfg/flanneld
[root@manager107 k8s]# scp /usr/lib/systemd/system/docker.service  10.0.3.68:/usr/lib/systemd/system/docker.service
[root@manager107 k8s]# scp /usr/lib/systemd/system/docker.service  10.0.3.80:/usr/lib/systemd/system/docker.service
[root@manager107 k8s]# scp /usr/lib/systemd/system/flanneld.service  10.0.3.68:/usr/lib/systemd/system/flanneld.service
[root@manager107 k8s]# scp /usr/lib/systemd/system/flanneld.service  10.0.3.80:/usr/lib/systemd/system/flanneld.service
#107上启动flannel
[root@manager107 ~]# systemctl daemon-reload
[root@manager107 ~]# systemctl enable flanneld
[root@manager107 ~]# systemctl start flanneld
[root@manager107 ~]# systemctl restart docker
#68上启动flannel
[root@worker68 ~]# systemctl daemon-reload
[root@worker68 ~]# systemctl enable flanneld
[root@worker68 ~]# systemctl start flanneld
[root@worker68 ~]# systemctl restart docker
#80上启动flannel
[root@worker80 ~]# systemctl daemon-reload
[root@worker80 ~]# systemctl enable flanneld
[root@worker80 ~]# systemctl start flanneld
[root@worker80 ~]# systemctl restart docker
#查看是否生效
[root@manager107 ~]# ip add

10、部署master节点

kubernetes master 节点运行如下组件:

  • kube-apiserver
  • kube-scheduler
  • kube-controller-manager

kube-scheduler 和 kube-controller-manager 可以以集群模式运行,通过 leader 选举产生一个工作进程,其它进程处于阻塞模式。

#将二进制文件解压拷贝到master 节点
[root@manager107 ~]# cd /home/workspace/packages/k8s1.13-centos
[root@manager107 k8s1.13-centos]# tar -xvf kubernetes-server-linux-amd64.tar.gz
[root@manager107 k8s1.13-centos]# cd kubernetes/server/bin/
[root@manager107 bin]# cp kube-scheduler kube-apiserver kube-controller-manager kubectl /k8s/kubernetes/bin/
#拷贝认证
[root@manager107 bin]# cd /home/workspace/k8s/
[root@manager107 k8s]# cp *pem /k8s/kubernetes/ssl/
#部署 kube-apiserver 组件
##创建 TLS Bootstrapping Token
[root@manager107 k8s]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
e9ca0f3e1b66c9bef910b47171490c53
[root@manager107 k8s]# vim /k8s/kubernetes/cfg/token.csv
e9ca0f3e1b66c9bef910b47171490c53,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
##创建apiserver配置文件
[root@manager107 k8s]# vim /k8s/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://10.0.3.107:2379,https://10.0.3.68:2379,https://10.0.3.80:2379 \
--bind-address=10.0.3.107 \
--secure-port=6443 \
--advertise-address=10.0.3.107 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/k8s/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/k8s/kubernetes/ssl/server.pem  \
--tls-private-key-file=/k8s/kubernetes/ssl/server-key.pem \
--client-ca-file=/k8s/kubernetes/ssl/ca.pem \
--service-account-key-file=/k8s/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/k8s/etcd/ssl/ca.pem \
--etcd-certfile=/k8s/etcd/ssl/server.pem \
--etcd-keyfile=/k8s/etcd/ssl/server-key.pem"
##创建 kube-apiserver systemd unit 文件
[root@manager107 k8s]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/k8s/kubernetes/cfg/kube-apiserver
ExecStart=/k8s/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
#启动服务
[root@manager107 k8s]# systemctl daemon-reload
[root@manager107 k8s]# systemctl enable kube-apiserver
[root@manager107 k8s]# systemctl restart kube-apiserver
#查看apiserver是否运行
[root@manager107 k8s]# systemctl status kube-

原文地址:https://www.cnblogs.com/dowi/p/11009611.html