Kubernetes 运维(上)
# Kubernetes 运维(上)
# 基础运维
# 【题目1】Pod
在master节点/root目录下编写yaml文件nginx.yaml,具体要求如下:
- Pod名称:
nginx-pod
; - 命名空间:
default
; - 容器名称:
mynginx
; - 镜像:
nginx
; - 拉取策略:
IfNotPresent
; - 容器端口:
80
。
完成后使用该yaml文件创建Pod,并提交master节点的用户名、密码和IP到答题框。
[root@master ~]# kubectl run nginx-pod --image nginx --port 80 --dry-run -o yaml > nginx.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
labels:
app: test
spec:
containers:
- name: mynginx
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
# 【题目2】多容器Pod
在master节点/root目录下编写yaml文件mu-pod.yaml,具体要求如下:
- Pod名称:
mu-pod
; - 命名空间:
default
; - Pod包含两个容器:
- 容器1名称:
containers01
;镜像:nginx;容器端口:80
; - 容器2名称:
containers02
;镜像:tomcat;容器端口:8080
;
- 容器1名称:
- 拉取策略:
IfNotPresent
。
完成后使用该yaml文件创建Pod,并提交master节点的用户名、密码和IP到答题框。
[root@master ~]# kubectl run mu-pod --image=nginx --port=80 --dry-run -o yaml > mu-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: mu-pod
labels:
app: test
spec:
containers:
- name: containers01
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
- name: containers0
image: tomcat
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
# 【题目3】Namespace
在master节点/root目录下编写yaml文件my-namespace.yaml,具体要求如下:
- Namespace名称:
test
。
完成后使用该yaml文件创建Namespace,并提交master节点的用户名、密码和IP到答题框。
[root@master ~]# kubectl create namespace test -o yaml --dry-run > my-namespace.yaml
[root@master ~]# vi my-namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: test
labels:
project: test
[root@master ~]# kubectl create -f my-namespace.yaml
namespace/test created
[root@master ~]# kubectl get ns
NAME STATUS AGE
default Active 7h34m
kube-node-lease Active 7h34
kube-public Active 7h34m
kube-system Active 7h34m
kubernetes-dashboard Active 7h33m
springcloud Active 85m
test Active 5s
# 【题目4】Deployment
在master节点/root目录下编写yaml文件nginx-deployment.yaml,具体要求如下:
- Deployment名称:
nginx-deployment
; - 命名空间:
default
; - Pod名称:
nginx-deploymen
t,副本数:2
; - 网络:
hostNetwork
; - 镜像:
nginx
; - 容器端口:
80
完成后使用该yaml文件创建Deployment,并提交master节点的用户名、密码和IP到答题框。
[root@master ~]# kubectl create deployment nginx-deployment --image=nginx -o yaml --dry-run > nginx-deployment.yaml
[root@master ~]# vi nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx-deploy
spec:
replicas: 2
selector:
matchLabels:
app: nginx-pod
template:
metadata:
labels:
app: nginx-pod
spec:
hostNetwork: True
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
command: [ "/bin/bash","-ce","tail -f /dev/null" ]
ports:
- containerPort: 80
[root@master ~]# kubectl create -f nginx-deployment.yaml
deployment.apps/nginx-deployment created
# 【题目5】Service
在master节点/root目录下编写yaml文件service-clusterip.yaml,具体要求如下:
- Service名称:
service-clusterip
; - 命名空间:
default
; - 集群内部访问端口:
80
;targetPort:81
; - Service类型:
ClusterIP
。
完成后使用该yaml文件创建Service,并提交master节点的用户名、密码和IP到答题框。
[root@master ~]# kubectl create service clusterip service-clusterip --tcp=80:81 -o yaml --dry-run > service-clusterip.yaml
[root@master ~]# vi service-clusterip.yaml
apiVersion: v1
kind: Service
metadata:
labels:
run: nginx
name: service-clusterip
spec:
ports:
- port: 80
protocol: TCP
targetPort: 81
selector:
app: nginx
type: ClusterIP
[root@master ~]# kubectl create -f service-clusterip.yaml
service/service-clusterip created
# 【题目6】RBAC--Role
在master节点/root目录下编写yaml文件role.yaml,具体要求如下:
- Role名称:
pod-reader
; - 命名空间:
default
; - 对default命名空间内的Pod拥有
get、watch、list
的权限。
完成后使用该yaml文件创建Role,并提交master节点的用户名、密码和IP到答题框。
[root@master ~]# kubectl create role pod-reader --verb=get,watch,list --resource=pod --dry-run -o yaml > role.yaml
[root@master ~]# vi role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pod-reader
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]
[root@master ~]# kubectl create -f role.yaml
role.rbac.authorization.k8s.io/role-example created
# 【题目7】粗并发Job
在master节点/root目录下编写yaml文件perl-job.yaml,具体要求如下:
- Job名称:
myjob;
- 命名空间:
default;
- 镜像:
perl;
- 输出圆周率小数点后2000位:
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"];
- Pods运行成功次数:
10;
- Pod最大失败次数:
4
次; - 并行运行的Pod的个数:
2
。
完成后使用该yaml文件创建Job,并提交master节点的用户名、密码和IP到答题框。
[root@master ~]# vi perl-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: myjob
spec:
completions: 10
parallelism: 2
template:
spec:
containers:
- name: myjob
image: perl
imagePullPolicy: IfNotPresent
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
restartPolicy: Never
backoffLimit: 4
[root@master ~]# kubectl create -f perl-job.yaml
job.batch/myjob created
[root@master ~]# kubectl get job
NAME COMPLETIONS DURATION AGE
myjob 0/10 5m29s 5m29s
# 【题目8】PV
在master节点/root目录下编写yaml文件pv.yaml,具体要求如下:
- PV名称:
pv-local;
- 命名空间:
default;
- 回收策略:
Delete;
- 访问模式:
RWO;
- 挂载路径:node节点
/data/k8s/localpv;
- 卷容量:
5G。
[root@master ~]# vi pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-local
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: local-storage
local:
path: /data/k8s/localpv
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node
# 【题目9】ReplicaSet
在master节点/root目录下编写yaml文件replicaset.yaml,具体要求如下:
- Replicaset名称:
nginx;
- 命名空间:
default;
- 副本数:
3;
- 镜像:
nginx。
[root@master work]# cat replicaset.yaml
apiVersion: v1
kind: ReplicaSet
metadata:
name: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
[root@master ~]# kubectl create -f replicaset.yaml
replicationcontroller/nginx created
# 【题目10】HPA
在master节点/root目录下编写yaml文件hpa.yaml,具体要求如下:
- HPA名称:
frontend-scaler;
- 命名空间:
default;
- 副本数伸缩范围:
3--10;
- 期望每个Pod的CPU使用率为
50%
。
[root@master work]# kubectl autoscale replicaset nginx --max=10 --min=3 --cpu-percent=50 --name frontend-scaler --dry-run -o yaml >hpa.yaml
[root@master ~]# vi hpa.yaml
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: frontend-scaler
spec:
scaleTargetRef:
kind: ReplicaSet
name: frontend
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 50
[root@master ~]# kubectl create -f hpa.yaml
horizontalpodautoscaler.autoscaling/frontend-scaler created
# 【题目11】Secrets
在master节点/root目录下编写yaml文件secret.yaml,具体要求如下:
- Secret名称:
mysecret;
- 命名空间:
default;
- 类型:
Opaque;
- username:
YWRtaW4=
; - password:
MWYyZDFlMmU2N2Rm。
[root@master ~]# kubectl create secret generic mysecret --type Opaque --from-literal=username=YWRtaW4= --from-literal=password=MWYyZDFlMmU2N2Rm --dry-run -o yaml > secret.yaml
[root@master ~]# vi secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: mysecret
type: Opaque
data:
username: YWRtaW4=
password: MWYyZDFlMmU2N2Rm
[root@master ~]# kubectl create -f secret.yaml
secret/mysecret created
# 【题目12】NetworkPolicy
在master节点/root目录下编写yaml文件network-policy-deny.yaml,具体要求如下:
- NetworkPolicy名称:
default-deny;
- 命名空间:
default;
- 默认禁止所有入Pod流量。
[root@master ~]# vi network-policy-deny.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny
spec:
podSelector: {}
policyTypes:
- Ingress
ingress:
- from:
- podSelector: {}
[root@master ~]# kubectl create -f network-policy-deny.yaml
networkpolicy.networking.k8s.io/default-deny created
# 【题目13】健康检查
在master节点/root目录下编写yaml文件liveness_httpget.yaml,具体要求如下:
- Pod名称:
liveness-http;
- 命名空间:
default;
- 镜像:
nginx
;端口:80;
- 容器启动时运行命令“
echo Healty > /usr/share/nginx/html/healthz
”; - httpGet请求的资源路径为
/healthz
,地址默认为Pod IP,端口使用容器中定义的端口名称HTTP
; - 启动后延时30秒开始运行检测;
- 每隔3秒执行一次liveness probe。
[root@master ~]# vi liveness_httpget.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
test: liveness
name: liveness-http
spec:
containers:
- name: livenessdemo
image: nginx
ports:
- name: http
containerPort: 80
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- 'echo Healty > /usr/share/nginx/html/healthz'
livenessProbe:
httpGet:
path: /healthz
port: http
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 3
[root@master ~]# kubectl create -f liveness_httpget.yaml
pod/liveness-http created
# 【题目14】Volume
在master节点/root目录下编写yaml文件emptydir.yaml,具体要求如下:
- Pod名称:
pod-emptydir;
- 命名空间:
default;
- 镜像:
nginx;
- Volume类型:
emptyDir
;名称data-volume;
- 挂载路径:
/data。
[root@master ~]# vi emptydir.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-emptydir
spec:
volumes:
- name: data-volume
emptyDir: {}
containers:
- image: nginx
name: test-container
volumeMounts:
- mountPath: /data
name: data-volume
[root@master ~]# kubectl create -f emptydir.yaml
pod/pod-emptydir created
# 【题目15】ResourceQuota
创建命名空间quota-example,在master节点/root目录下编写yaml文件quota.yaml,具体要求如下:
- ResourceQuota名称:
compute-resources;
- 命名空间:
quota-example;
- 命名空间内所有Pod数量不超过
4;
- 命名空间内所有容器内存申请总和不得超过
1G;
- 命名空间内所有内存限制不得超过
2G;
- 命名空间内所有容器申请的CPU不得超过
1;
- 命名空间内所有容器限制的CPU不得超过
2。
[root@master work]# kubectl create quota compute-resources --namespace quota-example --hard requests.cpu=2,limits.cpu=1,requests.memory=2Gi,limits.memory=1Gi --dry-run -o yaml > quota.yaml
[root@master ~]# kubectl create namespace quota-example
namespace/quota-example created
[root@master ~]# vi quota.yaml
apiVersion: v1
kind: ResourceQuota
metadata:
name: compute-resources
namespace: quota-example
spec:
hard:
pods: "4"
requests.cpu: "1"
requests.memory: 1Gi
limits.cpu: "2"
limits.memory: 2Gi
[root@master ~]# kubectl create -f quota.yaml
resourcequota/compute-resources created
# 【题目16】DaemonSet
在master节点/root目录下编写yaml文件daemonset.yaml,具体要求如下:
- DaemonSet名称:
fluentd;
- 命名空间:
default;
- 镜像:
quay.io/fluentd_elasticsearch/fluentd:v2.5.2;
- 容器名称:
fluentd-container01;
- 将Pod调度到非污点的节点上。
[root@master ~]# vi daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
labels:
app: fluentd-ds
spec:
selector:
matchLabels:
name: fluentd-pod
template:
metadata:
labels:
name: fluentd-pod
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: fluentd-container01
image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
terminationGracePeriodSeconds: 30
[root@master ~]# kubectl create -f daemonset.yaml
daemonset.apps/fluentd created
# 【题目17】Pod安全策略
在master节点/root目录下编写yaml文件policy.yaml,具体要求如下:
- 安全策略名称:
pod-policy;
- 仅禁止创建特权模式的
Pod;
- 其它所有字段都被允许。
[root@master ~]# vi policy.yaml
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: pod-policy
spec:
privileged: false # Don't allow privileged pods!
# The rest fills in some required fields.
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
runAsUser:
rule: RunAsAny
fsGroup:
rule: RunAsAny
volumes:
- '*'
[root@master ~]# kubectl create -f policy.yaml
podsecuritypolicy.policy/pod-policy created
# 【题目18】CronJob
在master节点/root目录下编写yaml文件date.yaml,具体要求如下:
- CronJob名称:
date;
- 命名空间:
default;
- 基于时间的调度规则:
每分钟启动一次;
- 容器名称:
hello;
镜像:busybox。
[root@master ~]# kubectl create cronjob date --image=busybox --schedule='*/1 * * * *' --dry-run -o yaml > date.yaml
[root@k8s-master ~]# vi date.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: date
spec:
schedule: "*/1 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox
command: ["echo","hellow k8s job!"]
restartPolicy: OnFailure
[root@master ~]# kubectl create -f date.yaml
cronjob.batch/date created
# 【题目19】RBAC
在master节点/root目录下编写yaml文件clusterrolebinding.yaml,具体要求如下:
- ClusterRoleBinding名称:
read-secrets-global;
- 绑定集群角色secret-reader,允许“
manager
”组中的任何用户读取
任意命名空间中secrets。
[root@master ~]# kubectl create clusterrolebinding read-secrets-global --clusterrole=secret-reader --group=manager -o yaml --dry-run > clusterrolebinding.yaml
[root@master ~]# vi clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: read-secrets-global
subjects:
- kind: Group
name: manager
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: secret-reader
apiGroup: rbac.authorization.k8s.io
[root@master ~]# kubectl apply -f clusterrolebinding.yaml
clusterrolebinding.rbac.authorization.k8s.io/read-secrets-global created
# 【题目20】HPA
在master节点/root目录下编写yaml文件deployment-hpa.yaml,具体要求如下:
- HPA名称:
deployment-hpa;
- 命名空间:
default;
- 基于deployment进行伸缩,副本数伸缩范围:
1--10;
- 期望每个Pod的CPU和内存使用率为
50%。
[root@master ~]# vi deployment-hpa.yaml
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: deployment-hpa
spec:
scaleTargetRef:
kind: ReplicaSet
name: deployment
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 50
[root@master ~]# kubectl create -f deployment-hpa.yaml
# 【题目21】健康检查
在master节点/root目录下编写yaml文件liveness_tcp.yaml,具体要求如下:
- Pod名称:
liveness-tcp;
- 命名空间:
default;
- 镜像:nginx;端口:
80;
- 使用liveness-tcp方式向Pod IP的80/tcp端口发起连接请求;
- 启动后延时30秒开始运行检测;
- 每隔3秒执行一次liveness probe。
[root@master ~]# vi liveness_tcp.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
test: liveness
name: liveness-tcp
spec:
containers:
- name: liveness-tcp-demo
image: nginx
ports:
- name: http
containerPort: 80
livenessProbe:
httpGet:
path: /healthz
port: http
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 3
[root@master ~]# kubectl create -f liveness_tcp.yaml
# 【题目22】LimitRange
在master节点/root目录下编写yaml文件mem-limit-range.yaml,具体要求如下:
- LimitRange名称:
mem-limit-range;
- 命名空间:
default;
- 默认容器能使用资源的最大值为
512Mi内存、2000m CPU;
- 默认容器的资源请求为
256Mi内存、500m CPU;
- 请求上限为
800Mi内存、3000m CPU;
- 请求下限为
100Mi内存,300m CPU;
- 内存和CPU超售的比率均为
2。
apiVersion: v1
kind: LimitRange
metadata:
name: mem-limit-range
spec:
limits:
- max:
cpu: "3000m"
memory: "800Mi"
min:
cpu: "300m"
memory: "100Mi"
default:
cpu: "2000m"
memory: "512Mi"
defaultRequest:
cpu: "500m"
memory: "256Mi"
maxLimitRequestRatio:
cpu: 2
memory: 2
type: Container
# 【题目23】nodeSelector
为node节点打上标签“disktype=ssd”,然后在master节点/root目录下编写yaml文件nodeselector.yaml,具体要求如下:
- Pod名称:
nginx-nodeselector;
- 命名空间:
default;
- 镜像:
nginx;
- 调度该Pod到标签为“
disktype=ssd
”的节点上。
apiVersion: v1
kind: Pod
metadata:
name: nginx-nodeselector
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
nodeSelector:
disktype: ssd
# 【题目24】亲和性
为master节点打上标签“disktype=ssd”和标签“exam=chinaskill”,然后在master节点/root目录下编写yaml文件affinity.yaml,具体要求如下:
- Deployment名称:
nginx-deployment;
- 命名空间:
default;
- 镜像:
nginx:latest;
- Pod只能调度到具有标签“
disktype=ssd
”的节点上; - 具有标签“exam=chinaskill”的节点优先被调度。
[root@master work]# cat qhx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels:
app: nginx-deployment
template:
metadata:
labels:
app: nginx-deployment
spec:
containers:
- image: nginx
name: nginx
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: disktype
operator: In
values:
- ssd
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: exam
operator: In
values:
- chinaskill
# 【题目25】NetworkPolicy
在master节点/root目录下编写yaml文件network-policy-allow.yaml,具体要求如下:
- NetworkPolicy名称:
allow-all;
- 命名空间:
default;
- 默认允许所有入Pod的流量。
[root@master ~]# vi network-policy-allow.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-all
spec:
podSelector: {}
ingress:
- {}
[root@master ~]# kubectl create -f network-policy-allow.yaml
# 【题目26】Volume
在master节点/root目录下编写yaml文件hostpath.yaml,具体要求如下:
- Pod名称:
hostpath;
- 命名空间:
default;
- 镜像:
nginx;
- Volume类型:
hostPath;
名称test-volume;
- 容器挂载路径:
/hostpath;
宿主机挂载路径:/data。
apiVersion: v1
kind: Pod
metadata:
name: hostpath
spec:
volumes:
- name: test-volume
hostPath:
path: /data
containers:
- image: nginx
name: test-container
volumeMounts:
- mountPath: /hostpath
name: test-volume
# 【题目27】PV
在master节点编写/root/pv.yaml文件完成PV的创建,要求如下:
- 名字:
pv-exam;
- 类型:
hostPath;
- 路径:
/data;
- 大小:
1G;
- 回收策略:
Recycle;
- 模式:
ReadWriteMany。
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-exam
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Recycle
hostPath:
path: /data
# 【题目28】资源配额
在master节点/root目录下编写Pod的yaml文件memory-request-limit.yaml,要求如下:
- 命名空间:
default;
- Pod名称:
memory-demo;
- 镜像:
nginx:latest;
镜像拉取策略:IfNotPresent;
- 容器名称:
memory-demo;
- 内存请求为100MiB,并被限制在200MiB以内。
apiVersion: v1
kind: Pod
metadata:
name: memory-demo
namespace: default
spec:
containers:
- name: memory-demo
image: nginx
imagePullPolicy: IfNotPresent
resources:
limits:
memory: "200Mi"
requests:
memory: "100Mi"
# 【题目29】服务部署
基于Kubernetes编排部署WordPress,具体要求如下:
- 在master节点部署NFS服务,做共享存储;
- 镜像:
mysql:5.6、wordpress:latest;
- 创建PVC
mysql-pv-claim
,绑定PVmysql-persistent-storage
,PV大小:20Gi,
权限:RWO
,回收策略:Recycle
; - 创建PVC
wp-pv-claim
,绑定PVwordpress-persistent-storage
,PV大小:20Gi
,权限:RWO
,回收策略:Recycle
; - wordpress以NodePort方式对外暴露端口
31000
。
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-persistent-storage
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: nfs
nfs:
path: /ptsnbnb2019
server: 192.168.200.10
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: wordpress-persistent-storage
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: nfs
nfs:
path: /ptsnbnb2019
server: 192.168.200.10
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pv-claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: wp-pv-claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs
resources:
requests:
storage: 20Gi
---
kind: Pod
apiVersion: v1
metadata:
name: wordpress
labels:
name: wordpress
spec:
containers:
- name: wordpress
image: 192.168.200.10/library/wordpress
ports:
- containerPort: 80
---
kind: Service
apiVersion: v1
metadata:
name: wordpress
spec:
type: NodePort
ports:
- port: 80
nodePort: 31000
selector:
name: wordpress
# 高级运维
# 【题目 1】健康检查--ExecAction
在 master 节点/root 目录下编写 yaml 文件 liveness_exec.yaml,具体要求如下:
- Pod 名称:
liveness-exec;
- 命名空间:
default;
- 镜像:
busybox;
容器名称:liveness;
- 容器启动时运行命令“
touch /tmp/healthy; sleep 60; rm -rf /tmp/healthy; sleep 600
”, 此命令在容器启动时创建/tmp/healthy 文件,并于 60 秒之后将其删除; - 存活性探针运行“
test -e /tmp/healthy
”命令检查文件的存在性,若文件存在则返 回状态码为 0,表示成功通过测试; - 启动后延时
5
秒开始运行检测; - 每隔
5
秒执行一次 liveness probe。
[root@master work]# cat liveness_exec.yaml
apiVersion: v1
kind: Pod
metadata:
name: liveness-exec
spec:
containers:
- image: 172.25.253.14/library/busybox
name: liveness-exec
imagePullPolicy: IfNotPresent
args:
- /bin/sh
- -c
- touch /tmp/healthy; sleep 60; rm -rf /tmp/healthy;sleep 600
livenessProbe:
exec:
command:
- test
- -e
- /tmp/healthy
initialDelaySeconds: 5
periodSeconds: 5
# 【题目 2】更新证书
Kubernetes 默认的证书有效期只有一年时间,对于某些场景下一个足够长的证书有效期 是非常有必要的。请将 Kubernetes 集群证书的有效期延长至 10 年。(需要用到的软件包 kubernetes_v1.18.1.tar.gz 在 http 服务下)
1.下载包
$ wget https://github.com/kubernetes/kubernetes/archive/v1.18.1.tar.gz
2.安装go
$ tar -zxf go1.14.6.linux-amd64.tar.gz -C /usr/local/
$ vi /etc/profile
export PATH=$PATH:/usr/local/go/bin
3.改配置文件
$ cd /srv/kubernetes/cmd/kubeadm/app/constants
$ vim constants.go
CertificateValidity = time.Hour * 24 * 365 * 10
4.编译
[root@master kubernetes]$ yum -y install make gcc rsync
[root@master kubernetes]$ make WHAT=cmd/kubeadm/ GOFLAGS=-v
5.替换编译后的kubeadm
$ cd /srv/kubernetes/_output/local/bin/linux/amd64
[root@master amd64]$ cp /usr/bin/kubeadm /usr/bin/kubeadm.bak
[root@master amd64]$ cp kubeadm /usr/bin/
cp: overwrite ‘/usr/bin/kubeadm’? y
6.查看一下当前证书年限
[root@master amd64]$ kubeadm alpha certs check-expiration
[root@master amd64]$ kubeadm alpha certs renew all
[root@master amd64]$ kubeadm alpha certs check-expiration
[check-expiration] Reading configuration from the cluster...
[check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
CERTIFICATE EXPIRES RESIDUAL TIME CERTIFICATE AUTHORITY EXTERNALLY MANAGED
admin.conf Dec 17, 2031 03:31 UTC 9y no
apiserver Dec 17, 2031 03:31 UTC 9y ca no
apiserver-etcd-client Dec 17, 2031 03:31 UTC 9y etcd-ca no
apiserver-kubelet-client Dec 17, 2031 03:31 UTC 9y ca no
controller-manager.conf Dec 17, 2031 03:31 UTC 9y no
etcd-healthcheck-client Dec 17, 2031 03:31 UTC 9y etcd-ca no
etcd-peer Dec 17, 2031 03:31 UTC 9y etcd-ca no
etcd-server Dec 17, 2031 03:31 UTC 9y etcd-ca no
front-proxy-client Dec 17, 2031 03:31 UTC 9y front-proxy-ca no
scheduler.conf Dec 17, 2031 03:31 UTC 9y no
CERTIFICATE AUTHORITY EXPIRES RESIDUAL TIME EXTERNALLY MANAGED
ca Dec 17, 2031 03:20 UTC 9y no
etcd-ca Dec 17, 2031 03:20 UTC 9y no
front-proxy-ca Dec 17, 2031 03:20 UTC 9y no
[root@master amd64]$ kubeadm alpha certs check-expiration|grep 9y|awk '{print$1" "$7}'
admin.conf 9y
apiserver 9y
apiserver-etcd-client 9y
apiserver-kubelet-client 9y
controller-manager.conf 9y
etcd-healthcheck-client 9y
etcd-peer 9y
etcd-server 9y
front-proxy-client 9y
scheduler.conf 9y
ca 9y
etcd-ca 9y
front-proxy-ca 9y
# 【题目 3】自定义资源管理
在 Kubernetes 中一切都可视为资源,通过自定义资源我们可以向 Kubernetes API 中增 加新资源类型。在 master 节点/root 目录下编写 yaml 文件 resource.yaml,具体要求如下:
- 自定义资源名称:
crontabs.stable.example.com;
- 指定自定义资源作用范围为
命名空间;
- 指定每个版本都可以通过
served
标志来独立启用或禁止; - 指定其中有且只有一个版本必需被标记为存储版本
v1。
[root@master ~]# cat resource.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: crontabs.stable.example.com
spec:
group: stable.example.com
versions:
- name: v1
served: true
storage: true
scope: Namespaced
names:
plural: crontabs
singular: crontab
kind: CronTab
shortNames:
- ct
[root@master ~]# kubectl apply -f resource.yaml
Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition
customresourcedefinition.apiextensions.k8s.io/crontabs.stable.example.com created
# 【题目 4】HPA管理
在 master 节点/root 目录下编写 yaml 文件 deployment-hpa.yaml,具体要求如下:
- HPA 名称:
deployment-hpa;
- 命名空间:
default;
- 基于 deployment 进行伸缩,副本数伸缩范围:
1--10;
- 期望每个 Pod 的 CPU和内存使用率为
50%
[root@master work]# kubectl create deployment nginx --image=172.25.253.14/library/nginx
deployment.apps/nginx created
[root@master work]# kubectl autoscale deployment nginx --max=10 --min=1 --cpu-percent=50 --name=deployment-hpa --dry-run -o yaml > deployment-hpa.yaml
[root@master work]# cat deployment-hpa.yaml
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
creationTimestamp: null
name: deployment-hpa
spec:
maxReplicas: 10
minReplicas: 1
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: nginx
targetCPUUtilizationPercentage: 50
status:
currentReplicas: 0
desiredReplicas: 0
[root@master work]# kubectl apply -f deployment-hpa.yaml
horizontalpodautoscaler.autoscaling/deployment-hpa created
[root@master work]# kubectl get hpa
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
deployment-hpa Deployment/nginx <unknown>/50% 1 10 0 3s
# 【题目 5】NetworkPolicy管理
在 master 节点/root 目录下编写 yaml 文件 network-policy-deny.yaml,具体要求如下:
- NetworkPolicy 名称:
default-deny;
- 命名空间:
default;
- 默认禁止所有入 Pod
[root@master work]# cat network-policy-deny.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny
spec:
podSelector: {}
policyTypes:
- Ingress
[root@master work]# kubectl apply -f network-policy-deny.yaml
networkpolicy.networking.k8s.io/default-deny created
[root@master work]# kubectl get networkpolicies.
NAME POD-SELECTOR AGE
default-deny <none> 7s
# 【题目 6】修改Pod数量限制
Kubernetes 默认每个节点只能启动 110 个 Pod,由于业务需要,将每个节点默认限制的 Pod 数量改为 200。
$ echo maxPods: 200 >> /var/lib/kubelet/config.yaml
$ systemctl restart kubelet
$ kubectl describe node master node|grep -w pods|grep 200
pods: 200
pods: 200
pods: 200
pods: 200
# 【题目 7】修改NodePort端口范围
Kubernetes 以 NodePort 方式暴露服务,默认的端口范围为 30000-32767,将 NodePort 的端口范围修改为 20000-65535。
- 修改kube-apiserver.yaml
[root@master ~]# vim /etc/kubernetes/manifests/kube-apiserver.yaml
···
- --secure-port=6443
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-cluster-ip-range=10.96.0.0/12
- --service-node-port-range=20000-65535 #新增这一行配置
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart kubelet
- 重启apiserver
# 获得 apiserver 的 pod 名字
[root@master ~]# export apiserver_pods=$(kubectl get pods --selector=component=kube-apiserver -n kube-system --output=jsonpath={.items..metadata.name})
# 删除 apiserver 的 pod
[root@master ~]# kubectl delete pod $apiserver_pods -n kube-system
pod "kube-apiserver-master" deleted
# 验证结果
[root@master ~]# kubectl describe pod $apiserver_pods -n kube-system
···
--secure-port=6443
--service-account-key-file=/etc/kubernetes/pki/sa.pub
--service-cluster-ip-range=10.96.0.0/12
--service-node-port-range=20000-65535 # 注意查看是否有这一行
--tls-cert-file=/etc/kubernetes/pki/apiserver.crt
--tls-private-key-file=/etc/kubernetes/pki/apiserver.key
# 【题目 8】污点与容忍
请将 master 节点设置为污点,策略设置为资源尽量不调度到污点节点中去。
$ kubectl taint node master master=master:PreferNoSchedule
其中[effect] 可取值: [ NoSchedule | PreferNoSchedule | NoExecute ]
NoSchedule: 一定不能被调度
PreferNoSchedule: 尽量不要调度
NoExecute: 不仅不会调度, 还会驱逐Node上已有的Pod
# 金丝雀发布
# 【题目1】金丝雀发布-安装
由于 Kubernetes 使用的 IPVS 模块需要系统内核版本支持,试使用提供的软件包 (Canary_v1.0.tar.gz 在 http 服务下)将系统内核进行升级,在 Kubernetes 集群上完成 Istio 的安装,并将 default Namespace 设置自动注入。
$ yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y
$ yum --enablerepo="elrepo-kernel" -y install kernel-ml.x86_64
$ grub2-set-default 0
$ grub2-mkconfig -o /boot/grub2/grub.cfg #重启电脑
$ tar -zxvf istio-1.9.5-linux-amd64.tar.gz
$ cd istio-1.9.5/
$ cp bin/istioctl /usr/local/bin/
$ kubectl create ns istio-system
$ istioctl install --set profile=demo -y
✔ Istio core installed
✔ Istiod installed
✔ Egress gateways installed
✔ Ingress gateways installed
✔ Installation complete
$ kubectl label namespace default istio-injection=enabled
使用检测命令检查:
$ kubectl -n istio-system get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
istio-egressgateway 1/1 1 1 25s
istio-ingressgateway 1/1 1 1 24s
istiod 1/1 1 1 20s
$ kubectl get ns --show-labels=true|grep default
default Active 121m istio-injection=enabled
$ uname -r
5.12.0-1.el7.elrepo.x86_64
# 【题目2】金丝雀发布-流量控制
使用赛项提供的文件(istio-1.9.5/samples/helloworld/helloworld.yaml)在 default 命名空间 下完成 hellworld 服务的部署,然后设置路由规则来控制流量分配,创建一个虚拟服务 helloworld;再创建一个目标规则 helloworld,将 10%的流量发送到金丝雀版本(v2)。
$ kubectl apply -f samples/helloworld/helloworld.yaml
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
helloworld-v1-776f57d5f6-m5xqg 2/2 Running 0 100m
helloworld-v2-54df5f84b-mwsf6 2/2 Running 0 100m
$ cat helloworld-gateway.yaml
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: helloworld
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: helloworld
spec:
hosts:
- "*"
gateways:
- helloworld
http:
- match:
- uri:
exact: /hello
route:
- destination:
host: helloworld
subset: v1
weight: 90
- destination:
host: helloworld
subset: v2
weight: 10
$ cat DestinationRule.yaml
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: helloword
spec:
host: helloword
subsets:
- name: v1
labels:
version: v1
- name: v2
labels:
version: v2
使用检测命令检查
$ kubectl get VirtualService,DestinationRule
NAME GATEWAYS HOSTS AGE
virtualservice.networking.istio.io/helloworld ["helloworld"] ["*"] 95m
NAME HOST AGE
destinationrule.networking.istio.io/helloword helloword 57m
$ kubectl describe VirtualService helloworld |grep Route -A 10 | xargs
Route: Destination: Host: helloworld Subset: v1 Weight: 90 Destination: Host: helloworld Subset: v2 Weight: 10 Events: <none>
# 【题目3】金丝雀发布-熔断
使用赛项提供的文件(istio-1.9.5/samples/httpbin/httpbin-fortio.yaml)在 default 命名空间下 完成 httpbin 服务的部署,创建一个目标规则 httpbin,在调用 httpbin 服务时应用熔断设置, 具体要求为
- 定义到目标主机的 HTTP1/TCP 最大连接数为
1;
- 定义针对一个目标的 HTTP 请求的最大排队数量为
1;
- 定义对某一后端的请求中,一个连接内能够发出的最大请求数量为
1。
$ kubectl apply -f samples/httpbin/sample-client/fortio-deploy.yaml
service/fortio created
deployment.apps/fortio-deploy created
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
fortio-deploy-576dbdfbc4-rdtrd 2/2 Running 0 2m28s
$ kubectl apply -f - <<EOF
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: httpbin
spec:
host: httpbin
trafficPolicy:
connectionPool:
tcp:
maxConnections: 1
http:
http1MaxPendingRequests: 1
maxRequestsPerConnection: 1
outlierDetection:
consecutive5xxErrors: 1
interval: 1s
baseEjectionTime: 3m
maxEjectionPercent: 100
EOF
destinationrule.networking.istio.io/httpbin created
使用检测命令检查
$ kubectl describe DestinationRule httpbin
···
Spec:
Host: httpbin
Traffic Policy:
Connection Pool:
Http:
http1MaxPendingRequests: 1
Max Requests Per Connection: 1
Tcp:
Max Connections: 1
Outlier Detection:
Base Ejection Time: 3m
consecutive5xxErrors: 1
Interval: 1s
Max Ejection Percent: 100
Events: <none>
# 【题目 4】金丝雀发布-流量镜像
使用赛项提供的文件(istio-1.9.5/samples/httpbin/httpbin-sleep.yaml)在 default 命名空间下 完成 httpbin-v1、httpbin-v2 以及 sleep 服务的部署,创建一个虚拟服务 httpbin 和一个目标规 则 httpbin,将所有流量路由到 httpbin-v1 服务,然后将 100%的相同流量镜像(即发送)到 httpbin-v2 服务。
这是httpbin.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: httpbin-v1
spec:
replicas: 1
selector:
matchLabels:
app: httpbin
version: v1
template:
metadata:
labels:
app: httpbin
version: v1
spec:
containers:
- image: docker.io/kennethreitz/httpbin
imagePullPolicy: IfNotPresent
name: httpbin
command: ["gunicorn", "--access-logfile", "-", "-b", "0.0.0.0:80", "httpbin:app"]
ports:
- containerPort: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: httpbin-v2
spec:
replicas: 1
selector:
matchLabels:
app: httpbin
version: v2
template:
metadata:
labels:
app: httpbin
version: v2
spec:
containers:
- image: docker.io/kennethreitz/httpbin
imagePullPolicy: IfNotPresent
name: httpbin
command: ["gunicorn", "--access-logfile", "-", "-b", "0.0.0.0:80", "httpbin:app"]
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: httpbin
labels:
app: httpbin
spec:
ports:
- name: http
port: 8000
targetPort: 80
selector:
app: httpbin
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: sleep
spec:
replicas: 1
selector:
matchLabels:
app: sleep
template:
metadata:
labels:
app: sleep
spec:
containers:
- name: sleep
image: curlimages/curl
command: ["/bin/sleep","3650d"]
imagePullPolicy: IfNotPresent
如下是解答
$ kubectl apply -f - <<EOF
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: httpbin
spec:
hosts:
- httpbin
http:
- route:
- destination:
host: httpbin
subset: v1
weight: 100
mirror:
host: httpbin
subset: v2
mirrorPercentage:
value: 100.0
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: httpbin
spec:
host: httpbin
subsets:
- name: v1
labels:
version: v1
- name: v2
labels:
version: v2
EOF
使用检测命令检查
$ kubectl describe virtualservice httpbin | grep Mirror -A 2
Mirror:
Host: httpbin
Subset: v2
Mirror Percentage:
Value: 100
Route:
# 【题目 5】金丝雀发布-Ingress Gateway
使用赛项提供的文件(istio-1.9.5/samples/httpbin/httpbin.yaml)在 default 命名空间下完成 httpbin 服务的部署,在 80 端口为 HTTP 流量配置一个网关 httpbin-gateway,并为 httpbin 服 务创建了虚拟服务配置 httpbin,包含 1 个路由规则,允许流量流向路径/headers,并允许通 过浏览器访问服务。
$ kubectl apply -f samples/httpbin/httpbin.yaml
serviceaccount/httpbin created
service/httpbin created
deployment.apps/httpbin created
$ kubectl apply -f - <<EOF
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: httpbin-gateway
spec:
selector:
istio: ingressgateway #使用Istio的默认网关
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: httpbin
spec:
hosts:
- "*"
gateways:
- httpbin-gateway
http:
- match:
- uri:
prefix: /headers
route:
- destination:
port:
number: 8000
host: httpbin
EOF
使用检测命令检查
$ kubectl get virtualservice
NAME GATEWAYS HOSTS AGE
httpbin ["httpbin-gateway"] ["*"] 6m51s
$ kubectl describe virtualservice httpbin
···
Spec:
Gateways:
httpbin-gateway
Hosts:
*
Http:
Match:
Uri:
Prefix: /headers
Route:
Destination:
Host: httpbin
Port:
Number: 8000
Events: <none>
$ curl -s -I -HHost:httpbin.example.com "http://node:$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}')/headers"
HTTP/1.1 200 OK
server: istio-envoy
date: Thu, 16 Dec 2021 05:32:00 GMT
content-type: application/json
content-length: 627
access-control-allow-origin: *
access-control-allow-credentials: true
x-envoy-upstream-service-time: 23
``` -->