第五阶段
readme.md
CLOUD
01
virtual-01
课程总结
第五阶段环境
02
openstack
03
cloud
04
docker-01
05
docker-02
06
K8S-01
07
K8S-02
08
k8s-03
09
K8S-04
10
k8s-05
token
ARCHITECTURE
2.01
elk-01
2.02
elk-02
2.03
Hadoop -- 01
2.04
Hadoop -- 02
2.05
Hadoop -- 03
kubectl 命令速查手册
本文档使用 MrDoc 发布
-
+
首页
k8s-05
# kubernetes -- 05 ## 部署Dashboard 拷贝 云盘的kubernetes/v1.17.6/dashboard 目录到 master 上 #### 上传镜像到私有仓库 ```shell # 上传 dashboard 镜像 [root@master dashboard]# docker load -i dashboard.tar.gz [root@master dashboard]# docker tag kubernetesui/dashboard:v2.0.0 192.168.1.100:5000/dashboard:v2.0.0 [root@master dashboard]# docker push 192.168.1.100:5000/dashboard:v2.0.0 # 上传 metrics-scraper 镜像 [root@master dashboard]# docker load -i metrics-scraper.tar.gz [root@master dashboard]# docker tag kubernetesui/metrics-scraper:v1.0.4 192.168.1.100:5000/metrics-scraper:v1.0.4 [root@master dashboard]# docker push 192.168.1.100:5000/metrics-scraper:v1.0.4 ``` #### 安装发布服务 ```yaml [root@master dashboard]# vim recommended.yaml # 190 行修改为 image: 192.168.1.100:5000/dashboard:v2.0.0 # 274 行修改为 image: 192.168.1.100:5000/metrics-scraper:v1.0.4 [root@master dashboard]# kubectl apply -f recommended.yaml # ---------------------------------- 查询验证 -------------------------------------- [root@master dashboard]# kubectl -n kubernetes-dashboard get pod NAME READY STATUS RESTARTS AGE dashboard-metrics-scraper-57bf85fcc9-vsz74 1/1 Running 0 52s kubernetes-dashboard-7b7f78bcf9-5k8vq 1/1 Running 0 52s [root@master dashboard]# kubectl -n kubernetes-dashboard get service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) dashboard-metrics-scraper ClusterIP 10.254.76.85 <none> 8000/TCP kubernetes-dashboard ClusterIP 10.254.211.125 <none> 443/TCP # ---------------------------------- 对外发布服务 ----------------------------------- [root@master dashboard]# vim service.yaml --- kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: ports: - port: 443 nodePort: 30443 # 新添加 targetPort: 8443 selector: k8s-app: kubernetes-dashboard type: NodePort # 新添加 [root@master dashboard]# kubectl apply -f service.yaml service/kubernetes-dashboard configured [root@master dashboard]# kubectl -n kubernetes-dashboard get service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE dashboard-metrics-scraper ClusterIP 10.254.66.25 <none> 8000/TCP 2m6s kubernetes-dashboard NodePort 10.254.165.155 <none> 443:30443/TCP 2m6s [root@master dashboard]# ``` 在华为云上为 node 节点绑定弹性公网IP [ <font color=#ff0000>https://弹性公网IP:30443/</font> ] #### token认证登录 ```shell [root@master dashboard]# kubectl apply -f admin-token.yaml [root@master ~]# kubectl -n kubernetes-dashboard get secrets NAME TYPE DATA AGE admin-user-token-bxjlz kubernetes.io/service-account-token 3 23s [root@master ~]# kubectl -n kubernetes-dashboard describe secrets admin-user-token-bxjlz Name: admin-user-token-bxjlz ... ... ca.crt: 1025 bytes namespace: 20 bytes token: 这里这个很长的字符串就是你要找的认证 token ``` 使用获取的 token 登录,通过 web 页面访问即可 ## 部署Prometheus #### 导入镜像 kubernetes/v1.17.6/prometheus/images/ 下所有镜像导入到私有仓库 拷贝所有镜像到 master 的 images 目录下 ```shell [root@master images]# for i in *.gz;do docker load -i ${i};done [root@master images]# img="prom/node-exporter v1.0.0 quay.io/coreos/prometheus-config-reloader v0.35.1 quay.io/coreos/prometheus-operator v0.35.1 quay.io/coreos/kube-state-metrics v1.9.2 grafana/grafana 6.4.3 jimmidyson/configmap-reload v0.3.0 quay.io/prometheus/prometheus v2.11.0 quay.io/prometheus/alertmanager v0.18.0 quay.io/coreos/k8s-prometheus-adapter-amd64 v0.5.0 quay.io/coreos/kube-rbac-proxy v0.4.1" [root@master images]# while read _f _v;do docker tag ${_f}:${_v} 192.168.1.100:5000/${_f##*/}:${_v} docker push 192.168.1.100:5000/${_f##*/}:${_v} docker rmi ${_f}:${_v} done <<<"${img}" [root@master images]# curl http://192.168.1.100:5000/v2/_catalog {"repositories":["alertmanager","configmap-reload","coredns","dashboard","etcd","flannel","grafana","k8s-prometheus-adapter-amd64","kube-apiserver","kube-controller-manager","kube-proxy","kube-rbac-proxy","kube-scheduler","kube-state-metrics","metrics-scraper","metrics-server","myos","nginx-ingress-controller","node-exporter","pause","prometheus","prometheus-config-reloader","prometheus-operator"]} ``` #### operator安装 拷贝prometheus/setup 目录到 master 下 ```yaml [root@master prometheus]# curl http://192.168.1.100:5000/v2/configmap-reload/tags/list {"name":"configmap-reload","tags":["v0.3.0"]} [root@master prometheus]# curl http://192.168.1.100:5000/v2/prometheus-config-reloader/tags/list {"name":"prometheus-config-reloader","tags":["v0.35.1"]} [root@master prometheus]# curl http://192.168.1.100:5000/v2/prometheus-operator/tags/list {"name":"prometheus-operator","tags":["v0.35.1"]} [root@master prometheus]# vim setup/prometheus-operator-deployment.yaml 27: - --config-reloader-image=192.168.1.100:5000/configmap-reload:v0.3.0 28: - --prometheus-config-reloader=192.168.1.100:5000/prometheus-config-reloader:v0.35.1 29: image: 192.168.1.100:5000/prometheus-operator:v0.35.1 # 验证安装 [root@master prometheus]# kubectl apply -f setup/ [root@master prometheus]# kubectl -n monitoring get pod NAME READY STATUS RESTARTS AGE prometheus-operator-75b4b59b74-72qhg 1/1 Running 0 47s ``` #### Prometheus server安装 拷贝 prometheus/prom-server 目录到 master 下 ```yaml [root@master prometheus]# curl http://192.168.1.100:5000/v2/prometheus/tags/list {"name":"prometheus","tags":["v2.11.0"]} [root@master prometheus]# vim prom-server/prometheus-prometheus.yaml 14: baseImage: 192.168.1.100:5000/prometheus 34: version: v2.11.0 [root@master prometheus]# kubectl apply -f prom-server/ [root@master prometheus]# kubectl -n monitoring get pod NAME READY STATUS RESTARTS AGE prometheus-k8s-0 3/3 Running 1 45s prometheus-k8s-1 3/3 Running 1 45s ``` #### prom-adapter安装 拷贝 prometheus/prom-adapter 目录到 master 下 ```yaml [root@master prometheus]# curl http://192.168.1.100:5000/v2/k8s-prometheus-adapter-amd64/tags/list {"name":"k8s-prometheus-adapter-amd64","tags":["v0.5.0"]} [root@master prometheus]# vim prom-adapter/prometheus-adapter-deployment.yaml 28: image: 192.168.1.100:5000/k8s-prometheus-adapter-amd64:v0.5.0 [root@master prometheus]# kubectl apply -f prom-adapter [root@master prometheus]# kubectl -n monitoring get pod NAME READY STATUS RESTARTS AGE prometheus-adapter-856854f9f6-knqtq 1/1 Running 0 6s ``` #### metrics-state安装 拷贝 prometheus/metrics-state 目录到 master 下 ```yaml [root@master prometheus]# curl http://192.168.1.100:5000/v2/kube-state-metrics/tags/list {"name":"kube-state-metrics","tags":["v1.9.2"]} [root@master prometheus]# curl http://192.168.1.100:5000/v2/kube-rbac-proxy/tags/list {"name":"kube-rbac-proxy","tags":["v0.4.1"]} [root@master prometheus]# vim metrics-state/kube-state-metrics-deployment.yaml 24: image: 192.168.1.100:5000/kube-rbac-proxy:v0.4.1 41: image: 192.168.1.100:5000/kube-rbac-proxy:v0.4.1 58: image: 192.168.1.100:5000/kube-state-metrics:v1.9.2 [root@master prometheus]# kubectl apply -f metrics-state/ [root@master prometheus]# kubectl -n monitoring get pod NAME READY STATUS RESTARTS AGE kube-state-metrics-5894f64799-krvn6 3/3 Running 0 4s ``` #### node-exporter安装 拷贝 prometheus/node-exporter 目录到 master 下 ```yaml [root@master prometheus]# curl http://192.168.1.100:5000/v2/node-exporter/tags/list {"name":"node-exporter","tags":["v1.0.0"]} [root@master prometheus]# curl http://192.168.1.100:5000/v2/kube-rbac-proxy/tags/list {"name":"kube-rbac-proxy","tags":["v0.4.1"]} [root@master prometheus]# vim node-exporter/node-exporter-daemonset.yaml 27: image: 192.168.1.100:5000/node-exporter:v1.0.0 57: image: 192.168.1.100:5000/kube-rbac-proxy:v0.4.1 [root@master prometheus]# kubectl apply -f node-exporter/ [root@master prometheus]# kubectl -n monitoring get pod NAME READY STATUS RESTARTS AGE node-exporter-7h4l9 2/2 Running 0 7s node-exporter-7vxmx 2/2 Running 0 7s node-exporter-mr6lw 2/2 Running 0 7s node-exporter-zg2j8 2/2 Running 0 7s ``` #### alertmanager安装 拷贝 prometheus/alertmanager 目录到 master 下 ```yaml [root@master prometheus]# curl http://192.168.1.100:5000/v2/alertmanager/tags/list {"name":"alertmanager","tags":["v0.18.0"]} [root@master prometheus]# vim alertmanager/alertmanager-alertmanager.yaml 09: baseImage: 192.168.1.100:5000/alertmanager 18: version: v0.18.0 [root@master prometheus]# kubectl apply -f alertmanager/ [root@master prometheus]# kubectl -n monitoring get pod NAME READY STATUS RESTARTS AGE alertmanager-main-0 2/2 Running 0 16s alertmanager-main-1 2/2 Running 0 16s alertmanager-main-2 2/2 Running 0 16s ``` #### grafana安装 拷贝 prometheus/grafana 目录到 master 下 ```yaml [root@master prometheus]# curl http://192.168.1.100:5000/v2/grafana/tags/list {"name":"grafana","tags":["6.4.3"]} [root@master prometheus]# vim grafana/grafana-deployment.yaml 19: - image: 192.168.1.100:5000/grafana:6.4.3 [root@master prometheus]# kubectl apply -f grafana/ [root@master prometheus]# kubectl -n monitoring get pod NAME READY STATUS RESTARTS AGE grafana-647d948b69-d2hv9 1/1 Running 0 19s ``` #### 发布服务 grafana服务 ```yaml [root@master prometheus]# cp grafana/grafana-service.yaml ./ [root@master prometheus]# vim grafana-service.yaml apiVersion: v1 kind: Service metadata: labels: app: grafana name: grafana namespace: monitoring spec: type: NodePort # 新添加 ports: - name: http port: 3000 nodePort: 30000 # 新添加 targetPort: http selector: app: grafana [root@master prometheus]# kubectl apply -f grafana-service.yaml [root@master prometheus]# kubectl -n monitoring get service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) grafana NodePort 10.254.79.49 <none> 3000:30000/TCP ``` 服务发布以后可以通过华为云弹性公网IP直接访问即可 grafana 第一次默认登录的用户名/密码(admin/admin) ## HPA集群 #### 集群图例 ![](/media/202204/Snip20220415_1_1649986057.png) #### 实验步骤 ```yaml [root@master ~]# vim myhpa.yaml --- apiVersion: apps/v1 kind: Deployment metadata: name: myweb spec: selector: matchLabels: app: apache replicas: 1 template: metadata: labels: app: apache spec: containers: - name: apache image: 192.168.1.100:5000/myos:httpd ports: - containerPort: 80 resources: requests: cpu: 200m restartPolicy: Always --- apiVersion: v1 kind: Service metadata: name: web-service spec: ports: - protocol: TCP port: 80 targetPort: 80 selector: app: apache type: ClusterIP --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: my-app annotations: kubernetes.io/ingress.class: "nginx" spec: backend: serviceName: web-service servicePort: 80 --- apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler metadata: name: myweb spec: minReplicas: 1 maxReplicas: 3 scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: myweb targetCPUUtilizationPercentage: 50 [root@master ~]# kubectl apply -f myhpa.yaml [root@master ~]# kubectl get hpa NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE myweb Deployment/myweb 0%/50% 1 3 1 15m ``` 当容器的cpu占用超过 50% 的时候,自动扩展一个POD,依次扩展,一直到最大值 如果cpu访问不足 50% 的时候,每 300s 缩减一个 POD 节点,直到最小值时停止 访问测试可以使用镜像内提供的 info.php增加系统负载,从而查看状态信息 访问地址 http://ip.xx.xx.xx/info.php?id=1000000 id 为计算结果集的数量,id 越大,占用内存和CPU越高,设置特别大容易死机 ## 弹性云实战 #### 弹性云架构图解 ![](/media/202204/Snip20220415_2_1649986080.png) #### 部署规划 ###### 01、集群配置 |主机名称 | ip地址 | 备注 | | --------- | ------------- | --------------------- | | master | 192.168.1.21 | K8S管理节点 | | node-0001 | 192.168.1.31 | K8S计算节点 | | node-0002 | 192.168.1.32 | K8S计算节点 | | node-0003 | 192.168.1.33 | K8S计算节点 | | node-0004 | 192.168.1.34 | K8S计算节点,需要扩容 | | node-0005 | 192.168.1.35 | K8S计算节点,需要扩容 | | registry | 192.168.1.100 | K8S镜像仓库 | ###### 02、资源文件 ```yaml [root@master ~]# mkdir webapp [root@master ~]# cd webapp/ [root@master webapp]# vim 01-pv.yaml --- kind: PersistentVolume apiVersion: v1 metadata: name: pv-nfs spec: volumeMode: Filesystem capacity: storage: 30Gi accessModes: - ReadWriteOnce - ReadOnlyMany - ReadWriteMany persistentVolumeReclaimPolicy: Retain nfs: server: 192.168.1.100 path: /var/webroot [root@master webapp]# vim 02-pvc.yaml --- kind: PersistentVolumeClaim apiVersion: v1 metadata: name: pvc-nfs spec: volumeMode: Filesystem accessModes: - ReadWriteMany resources: requests: storage: 25Gi [root@master webapp]# docker run -itd --name myphp 192.168.1.100:5000/myos:php-fpm f5147647288fcf87f308a726fade34c2bf2aa635e2b9c3f7ae867341af22e0a6 [root@master webapp]# docker cp myphp:/etc/php-fpm.d/www.conf /var/webconf/ [root@master webapp]# docker rm -f myphp myphp [root@master webapp]# vim /var/webconf/www.conf 12: listen = 0.0.0.0:9000 24: ; listen.allowed_clients = 127.0.0.1 [root@master webapp]# kubectl create configmap php-conf --from-file=/var/webconf/www.conf configmap/php-conf created [root@master webapp]# vim 04-phpdeploy.yaml --- kind: Deployment apiVersion: apps/v1 metadata: name: php-deploy spec: selector: matchLabels: app: myphp replicas: 1 template: metadata: labels: app: myphp spec: volumes: - name: php-conf configMap: name: php-conf - name: website persistentVolumeClaim: claimName: pvc-nfs containers: - name: php-fpm image: 192.168.1.100:5000/myos:php-fpm volumeMounts: - name: php-conf subPath: www.conf mountPath: /etc/php-fpm.d/www.conf - name: website mountPath: /usr/local/nginx/html ports: - protocol: TCP containerPort: 9000 resources: requests: cpu: 200m restartPolicy: Always [root@master webapp]# vim 05-phphpa.yaml --- kind: HorizontalPodAutoscaler apiVersion: autoscaling/v1 metadata: name: myphp spec: minReplicas: 1 maxReplicas: 5 scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: php-deploy targetCPUUtilizationPercentage: 50 [root@master webapp]# vim 06-phpservice.yaml --- apiVersion: v1 kind: Service metadata: name: phpbackend spec: ports: - protocol: TCP port: 9000 targetPort: 9000 selector: app: myphp type: ClusterIP [root@master webapp]# docker run -itd --name mynginx 192.168.1.100:5000/myos:nginx a8427c4972601eb85eb188a0f2a96d6d1d5b87947afdfced762bae37151188e5 [root@master webapp]# docker cp myphp:/usr/local/nginx/conf/nginx.conf /var/webconf/ [root@master webapp]# docker rm -f mynginx mynginx [root@master webapp]# vim /var/webconf/nginx.conf ... ... location ~ \.php$ { root html; fastcgi_pass phpbackend:9000; fastcgi_index index.php; include fastcgi.conf; } ... ... [root@master webapp]# kubectl create configmap nginx-conf --from-file=/var/webconf/nginx.conf configmap/nginx-conf created [root@master webapp]# vim 08-nginxdeploy.yaml --- kind: Deployment apiVersion: apps/v1 metadata: name: webcluster spec: selector: matchLabels: app: mynginx replicas: 3 template: metadata: labels: app: mynginx spec: volumes: - name: nginx-php configMap: name: nginx-conf - name: log-data hostPath: path: /var/log/weblog type: DirectoryOrCreate - name: website persistentVolumeClaim: claimName: pvc-nfs containers: - name: nginx image: 192.168.1.100:5000/myos:nginx volumeMounts: - name: nginx-php subPath: nginx.conf mountPath: /usr/local/nginx/conf/nginx.conf - name: log-data mountPath: /usr/local/nginx/logs - name: website mountPath: /usr/local/nginx/html ports: - protocol: TCP containerPort: 80 restartPolicy: Always [root@master webapp]# vim 09-nginxservice.yaml --- apiVersion: v1 kind: Service metadata: name: webforeground spec: ports: - protocol: TCP port: 80 targetPort: 80 selector: app: mynginx type: ClusterIP [root@master webapp]# vim 10-ingress.yaml --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: webcluster annotations: kubernetes.io/ingress.class: "nginx" spec: backend: serviceName: webforeground servicePort: 80 [root@master webapp]# ``` 完整版详见笔记 webcluster.yaml 资源文件
有云转晴
2022年4月15日 09:28
转发文档
收藏文档
上一篇
下一篇
手机扫码
复制链接
手机扫一扫转发分享
复制链接
Markdown文件
分享
链接
类型
密码
更新密码