kubeadm安装kubernetes
# Kubernetes
# 使用kubeadm安装kubernetes
安装要求:
- 一台或者多台服务器,操作系统Centos7.6
- 集群之间所有机器要互通 需要关闭防火墙
- 可以访问外网,需要拉取镜像
- 禁止swap分区
- 使用kubeadm搭建集群: apiserver、etcd、controller-manager、scheduler、kubelet(systemd守护进程管理,其他组件都是采用容器部署)、kube-proxy
角色 | IP |
---|---|
k8s-master | 10.10.10.128 |
k8s-node1 | 10.10.10.129 |
k8s-node2 | 10.10.10.130 |
# 搭建Kubernetes(准备环境)
所有的节点需要做以下的前期工作,保证搭建的时候不会出现意外情况。
# 更改主机名称:
[root@localhost ~]# hostnamectl set-hostname k8s-master
[root@localhost ~]# hostnamectl set-hostname k8s-node1
[root@localhost ~]# hostnamectl set-hostname k8s-node2
# 添加映射:
$ vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.10.128 k8s-master
10.10.10.129 k8s-node1
10.10.10.130 k8s-node2
# 关闭防火墙和selinux:
$ systemctl stop firewalld
$ systemctl disable firewalld
$ sed -i 's/enforcing/disabled/' /etc/selinux/config
$ setenforce 0 (临时关闭)
# 关闭swap:
$ swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab
# 配置路由转发:
将桥接的IPv4流量传递到iptables的链:
$ cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
$ sysctl --system
# 配置时间同步:
$ yum install -y ntpdate
$ ntpdate time.windows.com
# 安装Docker/kubeadm/kubelet (所有节点)
Kubernetes默认CRI(容器运行时)为Docker,因此先安装Docker。
# 安装Docker:
$ yum install -y yum-utils device-mapper-persistent-data lvm2
$ yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
$ yum makecache fast
$ yum -y install docker-ce
# 配置镜像加速:
$ cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://ably8t50.mirror.aliyuncs.com"],
"exec-opts":["native.cgroupdriver=systemd"]
}
EOF
$ systemctl daemon-reload
$ systemctl restart docker
# 配置阿里YUM源:
$ cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
$ yum install -y kubelet-1.20.0 kubeadm-1.20.0 kubectl-1.20.0
$ systemctl enable kubelet && systemctl start kubelet
# kubeadm部署Master集群
# 使用kubeadm部署集群
$ kubeadm init \
--apiserver-advertise-address=10.10.10.128 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.20.0 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--ignore-preflight-errors=all
$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
- --apiserver-advertise-address 集群通告地址
- --image-repository 由于默认的拉取镜像地址k8s.gcr.io国内无法访问,这里使用阿里云镜像仓库地址
- --kubernetes-version k8s的版本
- --service-cidr 集群内部虚拟网络,Pod的统一访问入口
- --pod-network-cidr Pod的网络,与下面部署的CNI网络组件yaml中保持一致
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady control-plane,master 20s v1.20.0
# 部署容器网络CNI:
Calico是一个纯三层的数据中心网络方案,Calico支持广泛的平台,包括Kubernetes、OpenStack等。
Calico 在每一个计算节点利用 Linux Kernel 实现了一个高效的虚拟路由器( vRouter) 来负责数据转发,而每个 vRouter 通过 BGP 协议负责把自己上运行的 workload 的路由信息向整个 Calico 网络内传播。
此外,Calico 项目还实现了 Kubernetes 网络策略,提供ACL功能。
拉取Pod网络插件
原因:coredns一直停滞在Pending,需要手动拉取, 最后选择的网络插件是:calico。
[root@k8s-node2 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-7f89b7bc75-b7nml 0/1 Pending 0 16m
coredns-7f89b7bc75-mrn6x 0/1 Pending 0 16m
etcd-k8s-node2 1/1 Running 0 16m
kube-apiserver-k8s-node2 1/1 Running 0 16m
kube-controller-manager-k8s-node2 1/1 Running 0 16m
kube-proxy-78zrr 1/1 Running 0 16m
kube-scheduler-k8s-node2 1/1 Running 0 16m
$ kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
$ kubectl apply -f calico.yaml
[root@k8s-node2 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-5f6cfd688c-g9mlv 0/1 Pending 0 7s
calico-node-4vbtr 0/1 Init:0/3 0 7s
coredns-7f89b7bc75-b7nml 0/1 Pending 0 12h
coredns-7f89b7bc75-mrn6x 0/1 Pending 0 12h
etcd-k8s-node2 1/1 Running 0 12h
kube-apiserver-k8s-node2 1/1 Running 0 12h
kube-controller-manager-k8s-node2 1/1 Running 0 12h
kube-proxy-78zrr 1/1 Running 0 12h
kube-scheduler-k8s-node2 1/1 Running 0 12h
[root@k8s-node2 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-5f6cfd688c-g9mlv 1/1 Running 0 2m7s
calico-node-4vbtr 1/1 Running 0 2m7s
coredns-7f89b7bc75-b7nml 1/1 Running 0 12h
coredns-7f89b7bc75-mrn6x 1/1 Running 0 12h
etcd-k8s-node2 1/1 Running 0 12h
kube-apiserver-k8s-node2 1/1 Running 0 12h
kube-controller-manager-k8s-node2 1/1 Running 0 12h
kube-proxy-78zrr 1/1 Running 0 12h
kube-scheduler-k8s-node2 1/1 Running 0 12h
# 部署Dashboard:
拉取镜像
$ wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
# 配置NodePort:
配置recommended的yaml文件修改集群外部访问的方式,type为NodePort,nodeport外部访问端口是30001
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 30001
selector:
k8s-app: kubernetes-dashboard
type: NodePort
[root@k8s-node2 ~]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard unchanged
serviceaccount/kubernetes-dashboard unchanged
service/kubernetes-dashboard unchanged
secret/kubernetes-dashboard-certs unchanged
secret/kubernetes-dashboard-csrf unchanged
secret/kubernetes-dashboard-key-holder unchanged
configmap/kubernetes-dashboard-settings unchanged
role.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
deployment.apps/kubernetes-dashboard unchanged
service/dashboard-metrics-scraper unchanged
deployment.apps/dashboard-metrics-scraper unchanged
# 绑定默认管理员集群角色:
# 创建用户
$ kubectl create serviceaccount dashboard-admin -n kube-system
serviceaccount/dashboard-admin created
# 用户授权
$ kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
# 获取用户Token
$ kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
Name: dashboard-admin-token-52gsz
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: dashboard-admin
kubernetes.io/service-account.uid: 18c45018-7eaa-4381-a0c8-5266cc46b8c9
Type: kubernetes.io/service-account-token
Data
====
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IkREZGU2aExFYU83aDdxVm8zM09rdGNVVTVydkVIUmxIdDhMejZBY2F0NkUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tNTJnc3oiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMThjNDUwMTgtN2VhYS00MzgxLWEwYzgtNTI2NmNjNDZiOGM5Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.JAb0BvGP268aBDAPx3LaipuK5s9e4-RgYW3y35t29xY-4DUaUKrkbOM2h4unU1QPrXPMS3M4uODvQi3jv5ckg-KAzCebCGCzHrPCKOuZT_HwZidvdBf5QD3oWYAzaF8CoV5Yvwu2D2e0bl4S5cC6TOmW5BwO0CgPZor_iMXzkI0UUR8AADFbHa0V4xMHMF2MWihclkGV5UsfK8mt9uHpRxcQD4RSy_uFY1SYkGklNeMHrysoFBNAv8lTGq-8LuZnsiRrjjwE9YucMgVe2bSGqlg6gtnz_arCwW7fQwx1UkvAl_qdp452GAD3EGkPVqzvuH10R-Fpk84hgJJpk_AMrg
ca.crt: 1066 bytes
# 使用https://10.10.10.128:30001登录访问集群
# kubeadm部署Node加入集群:
$ kubeadm join 10.10.10.130:6443 --token 9rkpbe.648h6r6rkp24f4xg \
--discovery-token-ca-cert-hash sha256:add1c33a33d53a261fb58516d207a9f13bc41ec5085960e36e4431bd70479e65
# 更换Containerd:
如果这里是Docker的环境,需要切换到Containerd容器引擎。
仍然需要将runsc 、containerd-shim-runsc-v1工具移到/usr/local/bin下面
1.准备配置
开启ipv4路由转发配置,重新生效系统配置
[root@master01 ~]# cat > /etc/sysctl.d/99-kubernetes-cri.conf << EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
[root@master01 ~]# sysctl -system
2、安装Containerd
如果默认安装了docker-ce会自动安装containerd的依赖,没有则需要安装containerd。
ubuntu添加docker-ce的repo源之后apt-get install的方式安装
[root@master01 ~]# cd /etc/yum.repos.d
[root@master01 ~]# wget http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@master01 ~]# yum install -y containerd.io
3、修改配置文件
- pause镜像地址
- Cgroup驱动改为systemd
- 增加runsc容器运行时
- 配置docker镜像加速器
修改配置文件默认的containerd的配置文件是空的,所以需要重新生成一个完整的配置文件。
containerd的目录是在 /etc/containerd
[root@master01 ~]# cd /etc/containerd/
[root@master01 containerd]# containerd config default > config.toml
[root@master01 containerd]# vim config.toml
43 [plugins."io.containerd.grpc.v1.cri"]
····
# 修改镜像为国内的镜像
56 sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.2"
····
# 添加一个runsc,指定类型为runsc,这里是跟runc同级
90 [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc]
91 runtime_type = "io.containerd.runsc.v1"
# 修改systemd驱动为true
94 [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
103 [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
···
114 SystemdCgroup = true
# 添加docker仓库的镜像源,配置阿里镜像加速
139 [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
140 [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
141 endpoint = ["https://b9pmyelo.mirror.aliyuncs.com"]
配置kubelet使用containerd
配置完之后先重启containerd,再重启kubelet,最后如果pod出现报错,重启docker。
[root@master01 ~]# vi /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --cgroup-driver=systemd
[root@master01 ~]# systemctl restart containerd
[root@master01 ~]# systemctl restart kubelet
[root@master01 ~]# systemctl restart docker
验证当前的Kubernetes容器基层
以上的步骤需要在所有集群配置
包括crictl连接containerd的配置也要在所有集群运行
[root@master01 ~]# kubectl get node -o wide | awk '{print $1,$2,$5,$NF}' | column -t
NAME STATUS VERSION CONTAINER-RUNTIME
master01 Ready v1.22.1 containerd://1.5.11
master02 Ready v1.22.1 containerd://1.5.11
master03 Ready v1.22.1 containerd://1.5.11
crictl连接containerd
containerd也有 ctr 管理工具,但功能比较简单,一般使用crictl工具检查和调试容器。
项目地址:https://github.com/kubernetes-sigs/cri-tools/
准备crictl连接containerd配置文件:
[root@master01 ~]# cat > /etc/crictl.yaml << EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
EOF
[root@master01 ~]# systemctl restart containerd
# 使用NFS作为默认SC:
安装NFS服务
配置共享存储路径开放权限
搭建NFS服务器和NFS-Client
在Master节点上面执行命令 vi /etc/exports,创建 exports 文件,启动 nfs 服务,创建共享目录。在其他的node节点上安装nfs服务并且开启服务。
$ yum install -y nfs-utils
$ echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
$ mkdir -p /nfs/data
$ systemctl enable rpcbind && systemctl start rpcbind
$ systemctl enable nfs-server && systemctl start nfs-server
#检查配置是否生效
exportfs
# 输出结果如下所示
/nfs/data <work>
$ showmount -e 10.10.10.128
Export list for 10.10.10.128
/nfs/data *
- 在Node节点上配置NFS-Client(node1和node2操作)
- 安装客户端工具,挂载 nfs 服务器上的共享目录到本机路径 /root/nfsmount。
$ yum install -y nfs-utils
$ mkdir /root/nfsmount
$ systemctl enable rpcbind && systemctl start rpcbind
$ systemctl enable nfs-server && systemctl start nfs-server
$ mount -t nfs 10.10.10.128:/nfs/data /root/nfsmount
# 测试Pod直接挂载NFS了
$ vim pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: vol-nfs
namespace: default
spec:
volumes:
- name: html
nfs:
path: /nfs/data #1000G
server: 自己的nfs服务器地址
containers:
- name: myapp
image: nginx
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html/
$ kubectl apply -f pod.yaml
$ echo ABCD > /nfs/data/index.html
$ curl
ABCD
# 设置动态供应:
# 创建provisioner(NFS环境前面已经搭好)
字段名称 | 填入内容 | 备注 |
---|---|---|
名称 | nfs-storage | 自定义存储类名称 |
NFS Server | 172.26.165.243 | NFS服务的IP地址 |
NFS Path | /nfs/data | NFS服务所共享的路径 |
在Master节点上面操作
先创建授权
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: kube-system
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: kube-system
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
namespace: kube-system
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME # provisioner的名字,需要和StorageClass对象中的provisioner字段一致
value: shisuyun/nfs
- name: NFS_SERVER
value: 10.10.10.128 # NFS服务器地址
- name: NFS_PATH
value: /nfs/data # NFS服务器目录
volumes:
- name: nfs-client-root
nfs:
server: 10.10.10.128 # NFS服务器地址
path: /nfs/data # NFS服务器目录
##这个镜像中volume的mountPath默认为/persistentvolumes,不能修改,否则运行时会报错
在Master上创建provisioner
创建存储类
改变默认sc
# "reclaim policy"有三种方式:Retain、Recycle、Deleted。
Retain
保护被PVC释放的PV及其上数据,并将PV状态改成"released",不将被其它PVC绑定。集群管理员手动通过如下步骤释放存储资源
手动删除PV,但与其相关的后端存储资源如(AWS EBS, GCE PD, Azure Disk, or Cinder volume)仍然存在。
手动清空后端存储volume上的数据。
手动删除后端存储volume,或者重复使用后端volume,为其创建新的PV。
Delete
删除被PVC释放的PV及其后端存储volume。对于动态PV其"reclaim policy"继承自其"storage class",
默认是Delete。集群管理员负责将"storage class"的"reclaim policy"设置成用户期望的形式,否则需要用户手动为创建后的动态PV编辑"reclaim policy"
Recycle
- 保留PV,但清空其上数据,已废弃
$ vi storageclass-nfs.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: storage-nfs
provisioner: storage.pri/nfs
reclaimPolicy: Delete
$ kubectl apply -f storageclass-nfs.yaml
#改变系统默认sc
https://kubernetes.io/zh/docs/tasks/administer-cluster/change-default-storage-class/#%e4%b8%ba%e4%bb%80%e4%b9%88%e8%a6%81%e6%94%b9%e5%8f%98%e9%bb%98%e8%ae%a4-storage-class
$ kubectl patch storageclass storage-nfs -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
# 验证nfs动态供应
# 创建pvc
$ vi pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-claim-01
# annotations:
# volume.beta.kubernetes.io/storage-class: "storage-nfs"
spec:
storageClassName: storage-nfs #这个class一定注意要和sc的名字一样
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
# 使用pvc
$ vi testpod.yaml
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: busybox
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/SUCCESS && exit 0 || exit 1"
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: pvc-claim-01
# 安装metrics-server:
先安装metrics-server(yaml如下,已经改好了镜像和配置,可以直接使用),这样就能监控到pod。node的资源情况(默认只有cpu、memory的资源审计信息哟,更专业的我们后面对接 Prometheus)
$ vim metrics.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: mirrorgooglecontainers/metrics-server-amd64:v0.3.6
imagePullPolicy: IfNotPresent
args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
ports:
- name: main-port
containerPort: 4443
protocol: TCP
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- name: tmp-dir
mountPath: /tmp
nodeSelector:
kubernetes.io/os: linux
kubernetes.io/arch: "amd64"
---
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: main-port
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
$ kubectl apply -f metrics.yaml