怎么做不花钱的网站,福步外贸论坛app,王烨演的电视剧,蚌埠市网站建设公司文章目录 一、创建企业微信机器人二、配置AlterManager告警发送至企业微信三、Prometheus接入AlterManager配置四、部署PrometheusAlterManager(放到一个Pod中)五、测试告警 注意#xff1a;请基于
PrometheusGrafana监控K8S集群(基于K8S环境部署)文章之上做本次实验。 一、创… 文章目录 一、创建企业微信机器人二、配置AlterManager告警发送至企业微信三、Prometheus接入AlterManager配置四、部署PrometheusAlterManager(放到一个Pod中)五、测试告警 注意请基于
PrometheusGrafana监控K8S集群(基于K8S环境部署)文章之上做本次实验。 一、创建企业微信机器人
1、创建企业微信机器人
点击登入企业微信网页版:
应用管理 机器人 创建应用 创建好之后如上图我们获取 点击查看获取 Secret 值。
2、获取企业ID 二、配置AlterManager告警发送至企业微信
1、创建AlterManager ConfigMap资源清单
vim alertmanager-cm.yaml
---
kind: ConfigMap
apiVersion: v1
metadata:name: alertmanagernamespace: prometheus
data:alertmanager.yml: |-templates:- /alertmanager/template/WeChat.tmplglobal:resolve_timeout: 1msmtp_smarthost: smtp.163.com:25smtp_from: 18145536045163.comsmtp_auth_username: 18145536045163.comsmtp_auth_password: KCGZFUDCCKMNZMKBsmtp_require_tls: falseroute:group_by: [alertname]group_wait: 10sgroup_interval: 10srepeat_interval: 10mreceiver: wechat-001receivers:- name: wechat-001wechat_configs:- corp_id: wwfb8d55841e190c10 # 企业IDto_user: all # 发送所有人agent_id: 1000002 # agentIDapi_secret: wa6kWECFthSpvdhcF-RPgjrIBzUvm-SpqXXXXXXXXXX # secret执行YAML资源清单
kubectl apply -f alertmanager-cm.yaml三、Prometheus接入AlterManager配置
1、创建新的Prometheus ConfigMap资源清单添加监控K8S集群告警规则
vim prometheus-alertmanager-cfg.yaml
---
kind: ConfigMap
apiVersion: v1
metadata:labels:app: prometheusname: prometheus-confignamespace: prometheus
data:prometheus.yml: |rule_files: - /etc/prometheus/rules.yml # 告警规则位置alerting:alertmanagers:- static_configs:- targets: [localhost:9093] # 接入AlterManagerglobal:scrape_interval: 15sscrape_timeout: 10sevaluation_interval: 1mscrape_configs:- job_name: kubernetes-nodekubernetes_sd_configs:- role: noderelabel_configs:- source_labels: [__address__]regex: (.*):10250replacement: ${1}:9100target_label: __address__action: replace- action: labelmapregex: __meta_kubernetes_node_label_(.)- job_name: kubernetes-node-cadvisorkubernetes_sd_configs:- role: nodescheme: httpstls_config:ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crtbearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/tokenrelabel_configs:- action: labelmapregex: __meta_kubernetes_node_label_(.)- target_label: __address__replacement: kubernetes.default.svc:443- source_labels: [__meta_kubernetes_node_name]regex: (.)target_label: __metrics_path__replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor- job_name: kubernetes-apiserverkubernetes_sd_configs:- role: endpointsscheme: httpstls_config:ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crtbearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/tokenrelabel_configs:- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]action: keepregex: default;kubernetes;https- job_name: kubernetes-service-endpointskubernetes_sd_configs:- role: endpointsrelabel_configs:- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]action: keepregex: true- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]action: replacetarget_label: __scheme__regex: (https?)- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]action: replacetarget_label: __metrics_path__regex: (.)- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]action: replacetarget_label: __address__regex: ([^:])(?::\d)?;(\d)replacement: $1:$2- action: labelmapregex: __meta_kubernetes_service_label_(.)- source_labels: [__meta_kubernetes_namespace]action: replacetarget_label: kubernetes_namespace- source_labels: [__meta_kubernetes_service_name]action: replacetarget_label: kubernetes_name - job_name: kubernetes-pods # 监控Pod配置,添加注解后才可以被发现kubernetes_sd_configs:- role: podrelabel_configs:- action: keepregex: truesource_labels:- __meta_kubernetes_pod_annotation_prometheus_io_scrape- action: replaceregex: (.)source_labels:- __meta_kubernetes_pod_annotation_prometheus_io_pathtarget_label: __metrics_path__- action: replaceregex: ([^:])(?::\d)?;(\d)replacement: $1:$2source_labels:- __address__- __meta_kubernetes_pod_annotation_prometheus_io_porttarget_label: __address__- action: labelmapregex: __meta_kubernetes_pod_label_(.)- action: replacesource_labels:- __meta_kubernetes_namespacetarget_label: kubernetes_namespace- action: replacesource_labels:- __meta_kubernetes_pod_nametarget_label: kubernetes_pod_name- job_name: kubernetes-etcd # 监控etcd配置scheme: httpstls_config:ca_file: /var/run/secrets/kubernetes.io/k8s-certs/etcd/ca.crtcert_file: /var/run/secrets/kubernetes.io/k8s-certs/etcd/server.crtkey_file: /var/run/secrets/kubernetes.io/k8s-certs/etcd/server.keyscrape_interval: 5sstatic_configs:- targets: [16.32.15.200:2379]rules.yml: | # K8S集群告警规则配置文件groups:- name: examplerules:- alert: apiserver的cpu使用率大于80%expr: rate(process_cpu_seconds_total{job~kubernetes-apiserver}[1m]) * 100 80for: 2slabels:severity: warnningannotations:description: {{$labels.instance}}的{{$labels.job}}组件的cpu使用率超过80%- alert: apiserver的cpu使用率大于90%expr: rate(process_cpu_seconds_total{job~kubernetes-apiserver}[1m]) * 100 90for: 2slabels:severity: criticalannotations:description: {{$labels.instance}}的{{$labels.job}}组件的cpu使用率超过90%- alert: etcd的cpu使用率大于80%expr: rate(process_cpu_seconds_total{job~kubernetes-etcd}[1m]) * 100 80for: 2slabels:severity: warnningannotations:description: {{$labels.instance}}的{{$labels.job}}组件的cpu使用率超过80%- alert: etcd的cpu使用率大于90%expr: rate(process_cpu_seconds_total{job~kubernetes-etcd}[1m]) * 100 90for: 2slabels:severity: criticalannotations:description: {{$labels.instance}}的{{$labels.job}}组件的cpu使用率超过90%- alert: kube-state-metrics的cpu使用率大于80%expr: rate(process_cpu_seconds_total{k8s_app~kube-state-metrics}[1m]) * 100 80for: 2slabels:severity: warnningannotations:description: {{$labels.instance}}的{{$labels.k8s_app}}组件的cpu使用率超过80%value: {{ $value }}%threshold: 80% - alert: kube-state-metrics的cpu使用率大于90%expr: rate(process_cpu_seconds_total{k8s_app~kube-state-metrics}[1m]) * 100 0for: 2slabels:severity: criticalannotations:description: {{$labels.instance}}的{{$labels.k8s_app}}组件的cpu使用率超过90%value: {{ $value }}%threshold: 90% - alert: coredns的cpu使用率大于80%expr: rate(process_cpu_seconds_total{k8s_app~kube-dns}[1m]) * 100 80for: 2slabels:severity: warnningannotations:description: {{$labels.instance}}的{{$labels.k8s_app}}组件的cpu使用率超过80%value: {{ $value }}%threshold: 80% - alert: coredns的cpu使用率大于90%expr: rate(process_cpu_seconds_total{k8s_app~kube-dns}[1m]) * 100 90for: 2slabels:severity: criticalannotations:description: {{$labels.instance}}的{{$labels.k8s_app}}组件的cpu使用率超过90%value: {{ $value }}%threshold: 90% - alert: kube-proxy打开句柄数600expr: process_open_fds{job~kubernetes-kube-proxy} 600for: 2slabels:severity: warnningannotations:description: {{$labels.instance}}的{{$labels.job}}打开句柄数600value: {{ $value }}- alert: kube-proxy打开句柄数1000expr: process_open_fds{job~kubernetes-kube-proxy} 1000for: 2slabels:severity: criticalannotations:description: {{$labels.instance}}的{{$labels.job}}打开句柄数1000value: {{ $value }}- alert: kubernetes-schedule打开句柄数600expr: process_open_fds{job~kubernetes-schedule} 600for: 2slabels:severity: warnningannotations:description: {{$labels.instance}}的{{$labels.job}}打开句柄数600value: {{ $value }}- alert: kubernetes-schedule打开句柄数1000expr: process_open_fds{job~kubernetes-schedule} 1000for: 2slabels:severity: criticalannotations:description: {{$labels.instance}}的{{$labels.job}}打开句柄数1000value: {{ $value }}- alert: kubernetes-controller-manager打开句柄数600expr: process_open_fds{job~kubernetes-controller-manager} 600for: 2slabels:severity: warnningannotations:description: {{$labels.instance}}的{{$labels.job}}打开句柄数600value: {{ $value }}- alert: kubernetes-controller-manager打开句柄数1000expr: process_open_fds{job~kubernetes-controller-manager} 1000for: 2slabels:severity: criticalannotations:description: {{$labels.instance}}的{{$labels.job}}打开句柄数1000value: {{ $value }}- alert: kubernetes-apiserver打开句柄数600expr: process_open_fds{job~kubernetes-apiserver} 600for: 2slabels:severity: warnningannotations:description: {{$labels.instance}}的{{$labels.job}}打开句柄数600value: {{ $value }}- alert: kubernetes-apiserver打开句柄数1000expr: process_open_fds{job~kubernetes-apiserver} 1000for: 2slabels:severity: criticalannotations:description: {{$labels.instance}}的{{$labels.job}}打开句柄数1000value: {{ $value }}- alert: kubernetes-etcd打开句柄数600expr: process_open_fds{job~kubernetes-etcd} 600for: 2slabels:severity: warnningannotations:description: {{$labels.instance}}的{{$labels.job}}打开句柄数600value: {{ $value }}- alert: kubernetes-etcd打开句柄数1000expr: process_open_fds{job~kubernetes-etcd} 1000for: 2slabels:severity: criticalannotations:description: {{$labels.instance}}的{{$labels.job}}打开句柄数1000value: {{ $value }}- alert: corednsexpr: process_open_fds{k8s_app~kube-dns} 600for: 2slabels:severity: warnning annotations:description: 插件{{$labels.k8s_app}}({{$labels.instance}}): 打开句柄数超过600value: {{ $value }}- alert: corednsexpr: process_open_fds{k8s_app~kube-dns} 1000for: 2slabels:severity: criticalannotations:description: 插件{{$labels.k8s_app}}({{$labels.instance}}): 打开句柄数超过1000value: {{ $value }}- alert: kube-proxyexpr: process_virtual_memory_bytes{job~kubernetes-kube-proxy} 2000000000for: 2slabels:severity: warnningannotations:description: 组件{{$labels.job}}({{$labels.instance}}): 使用虚拟内存超过2Gvalue: {{ $value }}- alert: schedulerexpr: process_virtual_memory_bytes{job~kubernetes-schedule} 2000000000for: 2slabels:severity: warnningannotations:description: 组件{{$labels.job}}({{$labels.instance}}): 使用虚拟内存超过2Gvalue: {{ $value }}- alert: kubernetes-controller-managerexpr: process_virtual_memory_bytes{job~kubernetes-controller-manager} 2000000000for: 2slabels:severity: warnningannotations:description: 组件{{$labels.job}}({{$labels.instance}}): 使用虚拟内存超过2Gvalue: {{ $value }}- alert: kubernetes-apiserverexpr: process_virtual_memory_bytes{job~kubernetes-apiserver} 2000000000for: 2slabels:severity: warnningannotations:description: 组件{{$labels.job}}({{$labels.instance}}): 使用虚拟内存超过2Gvalue: {{ $value }}- alert: kubernetes-etcdexpr: process_virtual_memory_bytes{job~kubernetes-etcd} 2000000000for: 2slabels:severity: warnningannotations:description: 组件{{$labels.job}}({{$labels.instance}}): 使用虚拟内存超过2Gvalue: {{ $value }}- alert: kube-dnsexpr: process_virtual_memory_bytes{k8s_app~kube-dns} 2000000000for: 2slabels:severity: warnningannotations:description: 插件{{$labels.k8s_app}}({{$labels.instance}}): 使用虚拟内存超过2Gvalue: {{ $value }}- alert: HttpRequestsAvgexpr: sum(rate(rest_client_requests_total{job~kubernetes-kube-proxy|kubernetes-kubelet|kubernetes-schedule|kubernetes-control-manager|kubernetes-apiservers}[1m])) 1000for: 2slabels:team: adminannotations:description: 组件{{$labels.job}}({{$labels.instance}}): TPS超过1000value: {{ $value }}threshold: 1000 - alert: Pod_restartsexpr: kube_pod_container_status_restarts_total{namespace~kube-system|default|monitor-sa} 0for: 2slabels:severity: warnningannotations:description: 在{{$labels.namespace}}名称空间下发现{{$labels.pod}}这个pod下的容器{{$labels.container}}被重启,这个监控指标是由{{$labels.instance}}采集的value: {{ $value }}threshold: 0- alert: Pod_waitingexpr: kube_pod_container_status_waiting_reason{namespace~kube-system|default} 1for: 2slabels:team: adminannotations:description: 空间{{$labels.namespace}}({{$labels.instance}}): 发现{{$labels.pod}}下的{{$labels.container}}启动异常等待中value: {{ $value }}threshold: 1 - alert: Pod_terminatedexpr: kube_pod_container_status_terminated_reason{namespace~kube-system|default|monitor-sa} 1for: 2slabels:team: adminannotations:description: 空间{{$labels.namespace}}({{$labels.instance}}): 发现{{$labels.pod}}下的{{$labels.container}}被删除value: {{ $value }}threshold: 1- alert: Etcd_leaderexpr: etcd_server_has_leader{jobkubernetes-etcd} 0for: 2slabels:team: adminannotations:description: 组件{{$labels.job}}({{$labels.instance}}): 当前没有leadervalue: {{ $value }}threshold: 0- alert: Etcd_leader_changesexpr: rate(etcd_server_leader_changes_seen_total{jobkubernetes-etcd}[1m]) 0for: 2slabels:team: adminannotations:description: 组件{{$labels.job}}({{$labels.instance}}): 当前leader已发生改变value: {{ $value }}threshold: 0- alert: Etcd_failedexpr: rate(etcd_server_proposals_failed_total{jobkubernetes-etcd}[1m]) 0for: 2slabels:team: adminannotations:description: 组件{{$labels.job}}({{$labels.instance}}): 服务失败value: {{ $value }}threshold: 0- alert: Etcd_db_total_sizeexpr: etcd_debugging_mvcc_db_total_size_in_bytes{jobkubernetes-etcd} 10000000000for: 2slabels:team: adminannotations:description: 组件{{$labels.job}}({{$labels.instance}})db空间超过10Gvalue: {{ $value }}threshold: 10G- alert: Endpoint_readyexpr: kube_endpoint_address_not_ready{namespace~kube-system|default} 1for: 2slabels:team: adminannotations:description: 空间{{$labels.namespace}}({{$labels.instance}}): 发现{{$labels.endpoint}}不可用value: {{ $value }}threshold: 1- name: 物理节点状态-监控告警rules:- alert: 物理节点cpu使用率expr: 100-avg(irate(node_cpu_seconds_total{modeidle}[5m])) by(instance)*100 90for: 2slabels:severity: ccriticalannotations:summary: {{ $labels.instance }}cpu使用率过高description: {{ $labels.instance }}的cpu使用率超过90%,当前使用率[{{ $value }}],需要排查处理 - alert: 物理节点内存使用率expr: (node_memory_MemTotal_bytes - (node_memory_MemFree_bytes node_memory_Buffers_bytes node_memory_Cached_bytes)) / node_memory_MemTotal_bytes * 100 90for: 2slabels:severity: criticalannotations:summary: {{ $labels.instance }}内存使用率过高description: {{ $labels.instance }}的内存使用率超过90%,当前使用率[{{ $value }}],需要排查处理- alert: InstanceDownexpr: up 0for: 2slabels:severity: criticalannotations: summary: {{ $labels.instance }}: 服务器宕机description: {{ $labels.instance }}: 服务器延时超过2分钟- alert: 物理节点磁盘的IO性能expr: 100-(avg(irate(node_disk_io_time_seconds_total[1m])) by(instance)* 100) 60for: 2slabels:severity: criticalannotations:summary: {{$labels.mountpoint}} 流入磁盘IO使用率过高description: {{$labels.mountpoint }} 流入磁盘IO大于60%(目前使用:{{$value}})- alert: 入网流量带宽expr: ((sum(rate (node_network_receive_bytes_total{device!~tap.*|veth.*|br.*|docker.*|virbr*|lo*}[5m])) by (instance)) / 100) 102400for: 2slabels:severity: criticalannotations:summary: {{$labels.mountpoint}} 流入网络带宽过高description: {{$labels.mountpoint }}流入网络带宽持续5分钟高于100M. RX带宽使用率{{$value}}- alert: 出网流量带宽expr: ((sum(rate (node_network_transmit_bytes_total{device!~tap.*|veth.*|br.*|docker.*|virbr*|lo*}[5m])) by (instance)) / 100) 102400for: 2slabels:severity: criticalannotations:summary: {{$labels.mountpoint}} 流出网络带宽过高description: {{$labels.mountpoint }}流出网络带宽持续5分钟高于100M. RX带宽使用率{{$value}}- alert: TCP会话expr: node_netstat_Tcp_CurrEstab 1000for: 2slabels:severity: criticalannotations:summary: {{$labels.mountpoint}} TCP_ESTABLISHED过高description: {{$labels.mountpoint }} TCP_ESTABLISHED大于1000%(目前使用:{{$value}}%)- alert: 磁盘容量expr: 100-(node_filesystem_free_bytes{fstype~ext4|xfs}/node_filesystem_size_bytes {fstype~ext4|xfs}*100) 80for: 2slabels:severity: criticalannotations:summary: {{$labels.mountpoint}} 磁盘分区使用率过高description: {{$labels.mountpoint }} 磁盘分区使用大于80%(目前使用:{{$value}}%)执行资源清单
kubectl apply -f prometheus-alertmanager-cfg.yaml2、由于在prometheus中新增了etcd所以生成一个etcd-certs这个在部署prometheus需要
kubectl -n prometheus create secret generic etcd-certs --from-file/etc/kubernetes/pki/etcd/server.key --from-file/etc/kubernetes/pki/etcd/server.crt --from-file/etc/kubernetes/pki/etcd/ca.crt四、部署PrometheusAlterManager(放到一个Pod中)
1、在node-1节点创建/data/alertmanager目录存放alertmanager数据
mkdir /data/alertmanager/template -p
chmod -R 777 /data/alertmanager2、在node-1节点创建WeChat报警模板
vim /data/alertmanager/template/WeChat.tmpl{{ define wechat.default.message }}
{{- if gt (len .Alerts.Firing) 0 -}}
{{- range $index, $alert : .Alerts -}}
{{- if eq $index 0 }}
xxx环境监控报警
告警状态{{ .Status }}
告警级别{{ .Labels.severity }}
告警类型{{ $alert.Labels.alertname }}
故障主机: {{ $alert.Labels.instance }} {{ $alert.Labels.pod }}
告警主题: {{ $alert.Annotations.summary }}
告警详情: {{ $alert.Annotations.message }}{{ $alert.Annotations.description}};
触发阀值{{ .Annotations.value }}
故障时间: {{ ($alert.StartsAt.Add 28800e9).Format 2006-01-02 15:04:05 }}end
{{- end }}
{{- end }}
{{- end }}
{{- if gt (len .Alerts.Resolved) 0 -}}
{{- range $index, $alert : .Alerts -}}
{{- if eq $index 0 }}
xxx环境异常恢复
告警类型{{ .Labels.alertname }}
告警状态{{ .Status }}
告警主题: {{ $alert.Annotations.summary }}
告警详情: {{ $alert.Annotations.message }}{{ $alert.Annotations.description}};
故障时间: {{ ($alert.StartsAt.Add 28800e9).Format 2006-01-02 15:04:05 }}
恢复时间: {{ ($alert.EndsAt.Add 28800e9).Format 2006-01-02 15:04:05 }}
{{- if gt (len $alert.Labels.instance) 0 }}
实例信息: {{ $alert.Labels.instance }}
{{- end }}end
{{- end }}
{{- end }}
{{- end }}
{{- end }}3、删除旧的prometheus deployment资源
kubectl delete deploy prometheus-server -n prometheus4、创建deployment资源
vim prometheus-alertmanager-deploy.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:name: prometheus-servernamespace: prometheuslabels:app: prometheus
spec:replicas: 1selector:matchLabels:app: prometheuscomponent: server#matchExpressions:#- {key: app, operator: In, values: [prometheus]}#- {key: component, operator: In, values: [server]}template:metadata:labels:app: prometheuscomponent: serverannotations:prometheus.io/scrape: falsespec:nodeName: node-1 # 调度到node-1节点serviceAccountName: prometheus # 指定sa服务账号containers:- name: prometheusimage: prom/prometheus:v2.33.5imagePullPolicy: IfNotPresentcommand:- /bin/prometheusargs:- --config.file/etc/prometheus/prometheus.yml- --storage.tsdb.path/prometheus- --storage.tsdb.retention24h- --web.enable-lifecycleports:- containerPort: 9090protocol: TCPvolumeMounts:- mountPath: /etc/prometheusname: prometheus-config- mountPath: /prometheus/name: prometheus-storage-volume- name: k8s-certsmountPath: /var/run/secrets/kubernetes.io/k8s-certs/etcd/- name: alertmanager#image: prom/alertmanager:v0.14.0image: prom/alertmanager:v0.23.0imagePullPolicy: IfNotPresentargs:- --config.file/etc/alertmanager/alertmanager.yml- --log.leveldebugports:- containerPort: 9093protocol: TCPname: alertmanagervolumeMounts:- name: alertmanager-configmountPath: /etc/alertmanager- name: alertmanager-storagemountPath: /alertmanager- name: localtimemountPath: /etc/localtimevolumes:- name: prometheus-configconfigMap:name: prometheus-config- name: prometheus-storage-volumehostPath:path: /datatype: Directory- name: k8s-certssecret:secretName: etcd-certs- name: alertmanager-configconfigMap:name: alertmanager- name: alertmanager-storagehostPath:path: /data/alertmanagertype: DirectoryOrCreate- name: localtimehostPath:path: /usr/share/zoneinfo/Asia/Shanghai执行YAML资源清单
kubectl apply -f prometheus-alertmanager-deploy.yaml查看状态
kubectl get pods -n prometheus5、创建AlterManager SVC资源
vim alertmanager-svc.yaml
---
apiVersion: v1
kind: Service
metadata:labels:name: prometheuskubernetes.io/cluster-service: truename: alertmanagernamespace: prometheus
spec:ports:- name: alertmanagernodePort: 30066port: 9093protocol: TCPtargetPort: 9093selector:app: prometheussessionAffinity: Nonetype: NodePort执行YAML资源清单
kubectl apply -f alertmanager-svc.yaml 查看状态
kubectl get svc -n prometheus五、测试告警
浏览器访问http://IP:30066 如上图可以看到Prometheus的告警信息已经发到AlterManager了AlertManager收到报警数据后会将警报信息进行分组然后根据AlertManager配置的 group_wait 时间先进行等待。等wait时间过后再发送报警信息至企业微信 如上图告警信息已经成功发往企业微信了