diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/README.md b/charts/k8s-monitoring/charts/feature-cluster-metrics/README.md index 1117c2ae9..030aa73fa 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/README.md +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/README.md @@ -119,6 +119,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 |-----|------|---------|-------------| | apiServer.enabled | bool | `false` | Scrape metrics from the API Server. | | apiServer.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for the API Server. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| apiServer.jobLabel | string | `"integrations/kubernetes/kube-apiserver"` | The value for the job label. | | apiServer.maxCacheSize | string | `nil` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides metrics.maxCacheSize | | apiServer.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | | apiServer.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. An empty list means keep all. | @@ -130,6 +131,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 |-----|------|---------|-------------| | cadvisor.enabled | bool | `true` | Scrape metrics from cAdvisor. | | cadvisor.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for cAdvisor metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| cadvisor.jobLabel | string | `"integrations/kubernetes/cadvisor"` | The value for the job label. | | cadvisor.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | | cadvisor.metricsTuning.dropEmptyContainerLabels | bool | `true` | Drop metrics that have an empty container label | | cadvisor.metricsTuning.dropEmptyImageLabels | bool | `true` | Drop metrics that have an empty image label | @@ -176,6 +178,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 | kepler.enabled | bool | `false` | Deploy and scrape Kepler metrics. | | kepler.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for Kepler. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with __ (i.e. __meta_kubernetes*) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) | | kepler.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for Kepler. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no __meta* labels are present. | +| kepler.jobLabel | string | `"integrations/kepler"` | The value for the job label. | | kepler.labelMatchers | object | `{"app.kubernetes.io/name":"kepler"}` | Label matchers used to select the Kepler pods | | kepler.maxCacheSize | string | `100000` | Sets the max_cache_size for the prometheus.relabel component for Kepler. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | | kepler.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | @@ -192,6 +195,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 | kube-state-metrics.enabled | bool | `true` | Scrape metrics from kube-state-metrics. | | kube-state-metrics.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for kube-state-metrics. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with __ (i.e. __meta_kubernetes*) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) | | kube-state-metrics.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for kube-state-metrics metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| kube-state-metrics.jobLabel | string | `"integrations/kubernetes/kube-state-metrics"` | The value for the job label. | | kube-state-metrics.labelMatchers | object | `{"app.kubernetes.io/name":"kube-state-metrics"}` | Labels used to select the kube-state-metrics service. | | kube-state-metrics.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | | kube-state-metrics.metricLabelsAllowlist | list | `["nodes=[agentpool,alpha.eksctl.io/cluster-name,alpha.eksctl.io/nodegroup-name,beta.kubernetes.io/instance-type,cloud.google.com/gke-nodepool,cluster_name,ec2_amazonaws_com_Name,ec2_amazonaws_com_aws_autoscaling_groupName,ec2_amazonaws_com_aws_autoscaling_group_name,ec2_amazonaws_com_name,eks_amazonaws_com_nodegroup,k8s_io_cloud_provider_aws,karpenter.sh/nodepool,kubernetes.azure.com/cluster,kubernetes.io/arch,kubernetes.io/hostname,kubernetes.io/os,node.kubernetes.io/instance-type,topology.kubernetes.io/region,topology.kubernetes.io/zone]"]` | `kube__labels` metrics to generate. The default is to include a useful set for Node labels. | @@ -210,6 +214,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 | kubeControllerManager.enabled | bool | `false` | Scrape metrics from the Kube Controller Manager | | kubeControllerManager.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for the Kube Controller Manager. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) | | kubeControllerManager.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for the Kube Controller Manager. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| kubeControllerManager.jobLabel | string | `"kube-controller-manager"` | The value for the job label. | | kubeControllerManager.maxCacheSize | string | `nil` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides metrics.maxCacheSize | | kubeControllerManager.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | | kubeControllerManager.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. An empty list means keep all. | @@ -222,6 +227,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 |-----|------|---------|-------------| | kubeDNS.enabled | bool | `false` | Scrape metrics from KubeDNS | | kubeDNS.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for KubeDNS. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| kubeDNS.jobLabel | string | `"integrations/kubernetes/kube-dns"` | The value for the job label. | | kubeDNS.maxCacheSize | string | `nil` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides metrics.maxCacheSize | | kubeDNS.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | | kubeDNS.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. An empty list means keep all. | @@ -234,6 +240,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 | kubeProxy.enabled | bool | `false` | Scrape metrics from the Kube Proxy | | kubeProxy.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for the Kube Proxy. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) | | kubeProxy.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for the Kube Proxy. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| kubeProxy.jobLabel | string | `"integrations/kubernetes/kube-proxy"` | The value for the job label. | | kubeProxy.maxCacheSize | string | `nil` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides metrics.maxCacheSize | | kubeProxy.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | | kubeProxy.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. An empty list means keep all. | @@ -247,6 +254,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 | kubeScheduler.enabled | bool | `false` | Scrape metrics from the Kube Scheduler | | kubeScheduler.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for the Kube Scheduler. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) | | kubeScheduler.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for the Kube Scheduler. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| kubeScheduler.jobLabel | string | `"kube-scheduler"` | The value for the job label. | | kubeScheduler.maxCacheSize | string | `nil` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides metrics.maxCacheSize | | kubeScheduler.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | | kubeScheduler.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. An empty list means keep all. | @@ -259,6 +267,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 |-----|------|---------|-------------| | kubelet.enabled | bool | `true` | Scrape metrics from kubelet. | | kubelet.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for Kubelet metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| kubelet.jobLabel | string | `"integrations/kubernetes/kubelet"` | The value for the job label. | | kubelet.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | | kubelet.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | | kubelet.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. | @@ -271,6 +280,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 |-----|------|---------|-------------| | kubeletResource.enabled | bool | `true` | Scrape resource metrics from kubelet. | | kubeletResource.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for Kubelet Resources metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| kubeletResource.jobLabel | string | `"integrations/kubernetes/resources"` | The value for the job label. | | kubeletResource.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | | kubeletResource.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | | kubeletResource.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. | @@ -286,6 +296,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 | node-exporter.enabled | bool | `true` | Scrape metrics from Node Exporter. | | node-exporter.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for Node Exporter. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with __ (i.e. __meta_kubernetes*) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) | | node-exporter.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for Node Exporter metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| node-exporter.jobLabel | string | `"integrations/node_exporter"` | The value for the job label. | | node-exporter.labelMatchers | object | `{"app.kubernetes.io/name":"node-exporter"}` | Labels used to select the Node Exporter pods. | | node-exporter.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | | node-exporter.metricsTuning.dropMetricsForFilesystem | list | `["tempfs"]` | Drop metrics for the given filesystem types | @@ -305,6 +316,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 | opencost.enabled | bool | `false` | Deploy and scrape OpenCost. | | opencost.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for OpenCost. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with __ (i.e. __meta_kubernetes*) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) | | opencost.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for OpenCost. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no __meta* labels are present. | +| opencost.jobLabel | string | `"integrations/opencost"` | The value for the job label. | | opencost.labelMatchers | object | `{"app.kubernetes.io/name":"opencost"}` | Label matchers used to select the OpenCost service | | opencost.maxCacheSize | string | `100000` | Sets the max_cache_size for the prometheus.relabel component for OpenCost. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | | opencost.metricsSource | string | `""` | The name of the metric destination where OpenCost will query for required metrics. Setting this will enable guided setup for required OpenCost parameters. To skip guided setup, set this to "custom". | @@ -330,6 +342,7 @@ Be sure perform actual integration testing in a live environment in the main [k8 | windows-exporter.enabled | bool | `true` | Scrape node metrics | | windows-exporter.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for Windows Exporter. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with __ (i.e. __meta_kubernetes*) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) | | windows-exporter.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for Windows Exporter metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) | +| windows-exporter.jobLabel | string | `"integrations/windows-exporter"` | The value for the job label. | | windows-exporter.labelMatchers | object | `{"app.kubernetes.io/name":"windows-exporter"}` | Labels used to select the Windows Exporter pods. | | windows-exporter.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | | windows-exporter.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_api_server.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_api_server.alloy.tpl index 871806a21..b71eac154 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_api_server.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_api_server.alloy.tpl @@ -5,6 +5,7 @@ kubernetes.apiserver "scrape" { clustering = true + job_label = {{ .Values.apiServer.jobLabel | quote }} {{- if $metricAllowList }} keep_metrics = "up|{{ $metricAllowList | join "|" }}" {{- end }} diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_cadvisor.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_cadvisor.alloy.tpl index ab3eb0f58..d8f65175c 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_cadvisor.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_cadvisor.alloy.tpl @@ -16,6 +16,7 @@ kubernetes.cadvisor "scrape" { clustering = true + job_label = {{ .Values.cadvisor.jobLabel | quote }} {{- if $metricAllowList }} keep_metrics = {{ $metricAllowList | join "|" | quote }} {{- end }} diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kepler.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kepler.alloy.tpl index 074ee40e1..de5cd88da 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kepler.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kepler.alloy.tpl @@ -43,7 +43,7 @@ discovery.relabel "kepler" { prometheus.scrape "kepler" { targets = discovery.relabel.kepler.output - job_name = "integrations/kepler" + job_name = {{ .Values.kepler.jobLabel | quote }} honor_labels = true scrape_interval = {{ .Values.kepler.scrapeInterval | default .Values.global.scrapeInterval | quote }} clustering { diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_controller_manager.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_controller_manager.alloy.tpl index 7294468e1..ce83773c4 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_controller_manager.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_controller_manager.alloy.tpl @@ -28,7 +28,7 @@ discovery.relabel "kube_controller_manager" { prometheus.scrape "kube_controller_manager" { targets = discovery.relabel.kube_controller_manager.output - job_name = "kube-controller-manager" + job_name = {{ .Values.kubeControllerManager.jobLabel | quote }} scheme = "https" scrape_interval = {{ .Values.kubeControllerManager.scrapeInterval | default .Values.global.scrapeInterval | quote }} bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_dns.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_dns.alloy.tpl index f61be9615..6158311ea 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_dns.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_dns.alloy.tpl @@ -5,6 +5,7 @@ kubernetes.kube_dns "scrape" { clustering = true + job_label = {{ .Values.kubeDNS.jobLabel | quote }} {{- if $metricAllowList }} keep_metrics = "up|{{ $metricAllowList | join "|" }}" {{- end }} diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_proxy.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_proxy.alloy.tpl index d6755788d..1893e21cf 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_proxy.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_proxy.alloy.tpl @@ -28,7 +28,7 @@ discovery.relabel "kube_proxy" { prometheus.scrape "kube_proxy" { targets = discovery.relabel.kube_proxy.output - job_name = "integrations/kubernetes/kube-proxy" + job_name = {{ .Values.kubeProxy.jobLabel | quote }} scheme = "http" scrape_interval = {{ .Values.kubeProxy.scrapeInterval | default .Values.global.scrapeInterval | quote }} clustering { diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_scheduler.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_scheduler.alloy.tpl index 117303856..01f041f2c 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_scheduler.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_scheduler.alloy.tpl @@ -28,7 +28,7 @@ discovery.relabel "kube_scheduler" { prometheus.scrape "kube_scheduler" { targets = discovery.relabel.kube_scheduler.output - job_name = "kube-scheduler" + job_name = {{ .Values.kubeScheduler.jobLabel | quote }} scheme = "https" scrape_interval = {{ .Values.kubeScheduler.scrapeInterval | default .Values.global.scrapeInterval | quote }} bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_state_metrics.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_state_metrics.alloy.tpl index f1809af6a..64ab5f6f3 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_state_metrics.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kube_state_metrics.alloy.tpl @@ -44,6 +44,7 @@ discovery.relabel "kube_state_metrics" { kube_state_metrics.scrape "metrics" { targets = {{ $scrapeTargets }} clustering = true + job_label = {{ (index .Values "kube-state-metrics").jobLabel | quote }} {{- if $metricAllowList }} keep_metrics = {{ $metricAllowList | join "|" | quote }} {{- end }} diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kubelet.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kubelet.alloy.tpl index d1405184b..4378baf07 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kubelet.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kubelet.alloy.tpl @@ -16,6 +16,7 @@ kubernetes.kubelet "scrape" { clustering = true + job_label = {{ .Values.kubelet.jobLabel | quote }} {{- if $metricAllowList }} keep_metrics = {{ $metricAllowList | join "|" | quote }} {{- end }} diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kubelet_resource.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kubelet_resource.alloy.tpl index 413ee6139..b224e4a0e 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kubelet_resource.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_kubelet_resource.alloy.tpl @@ -16,7 +16,7 @@ kubernetes.resources "scrape" { clustering = true - job_label = "integrations/kubernetes/resources" + job_label = {{ .Values.kubeletResource.jobLabel | quote }} {{- if $metricAllowList }} keep_metrics = {{ $metricAllowList | join "|" | quote }} {{- end }} diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_node_exporter.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_node_exporter.alloy.tpl index 8f7b77d65..da0ab0741 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_node_exporter.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_node_exporter.alloy.tpl @@ -49,7 +49,7 @@ discovery.relabel "node_exporter" { node_exporter.scrape "metrics" { targets = discovery.relabel.node_exporter.output - job_label = "integrations/node_exporter" + job_label = {{ (index .Values "node-exporter").jobLabel | quote }} clustering = true {{- if $metricAllowList }} keep_metrics = {{ $metricAllowList | join "|" | quote }} diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_opencost.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_opencost.alloy.tpl index b294b66ab..7a3093e79 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_opencost.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_opencost.alloy.tpl @@ -43,7 +43,7 @@ discovery.relabel "opencost" { prometheus.scrape "opencost" { targets = discovery.relabel.opencost.output - job_name = "integrations/opencost" + job_name = {{ .Values.opencost.jobLabel | quote }} honor_labels = true scrape_interval = {{ .Values.opencost.scrapeInterval | default .Values.global.scrapeInterval | quote }} clustering { diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_windows_exporter.alloy.tpl b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_windows_exporter.alloy.tpl index 4404a2627..a8eaa2dd9 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_windows_exporter.alloy.tpl +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/templates/_windows_exporter.alloy.tpl @@ -54,7 +54,7 @@ discovery.relabel "windows_exporter" { } prometheus.scrape "windows_exporter" { - job_name = "integrations/windows-exporter" + job_name = {{ (index .Values "windows-exporter").jobLabel | quote }} targets = discovery.relabel.windows_exporter.output scrape_interval = {{ (index .Values "windows-exporter").scrapeInterval | default .Values.global.scrapeInterval | quote }} clustering { diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/control_plane_test.yaml b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/control_plane_test.yaml index fa4ac1c27..6b6c58629 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/control_plane_test.yaml +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/control_plane_test.yaml @@ -28,6 +28,7 @@ tests: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -45,6 +46,7 @@ tests: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -126,6 +128,7 @@ tests: kubernetes.apiserver "scrape" { clustering = true + job_label = "integrations/kubernetes/kube-apiserver" scrape_interval = "60s" max_cache_size = 100000 forward_to = argument.metrics_destinations.value @@ -168,6 +171,7 @@ tests: kubernetes.kube_dns "scrape" { clustering = true + job_label = "integrations/kubernetes/kube-dns" scrape_interval = "60s" max_cache_size = 100000 forward_to = argument.metrics_destinations.value @@ -258,6 +262,7 @@ tests: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/custom_rules_test.yaml b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/custom_rules_test.yaml index 3405057b0..fb2ba860b 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/custom_rules_test.yaml +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/custom_rules_test.yaml @@ -44,6 +44,7 @@ tests: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -61,6 +62,7 @@ tests: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -168,6 +170,7 @@ tests: kube_state_metrics.scrape "metrics" { targets = discovery.relabel.kube_state_metrics.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/default_test.yaml b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/default_test.yaml index 9e9dc26bc..2ebd001ff 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/default_test.yaml +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/default_test.yaml @@ -26,6 +26,7 @@ tests: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -43,6 +44,7 @@ tests: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -141,6 +143,7 @@ tests: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/kepler_test.yaml b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/kepler_test.yaml index 5e2514cb8..e02a68135 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/kepler_test.yaml +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/kepler_test.yaml @@ -28,6 +28,7 @@ tests: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -45,6 +46,7 @@ tests: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -143,6 +145,7 @@ tests: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/metrics_tuning_test.yaml b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/metrics_tuning_test.yaml index 19e3bd6ff..782ea789e 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/metrics_tuning_test.yaml +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/metrics_tuning_test.yaml @@ -35,6 +35,7 @@ tests: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -52,6 +53,7 @@ tests: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" scrape_interval = "60s" max_cache_size = 100000 forward_to = [prometheus.relabel.cadvisor.receiver] @@ -149,6 +151,7 @@ tests: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" drop_metrics = "kube_replicaset.*" scheme = "http" diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/opencost_test.yaml b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/opencost_test.yaml index 1a60c6ed8..006eb0f5b 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/opencost_test.yaml +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/opencost_test.yaml @@ -28,6 +28,7 @@ tests: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -45,6 +46,7 @@ tests: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -143,6 +145,7 @@ tests: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/openshift_test.yaml b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/openshift_test.yaml index df79c1a84..12c98b9da 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/openshift_test.yaml +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/tests/openshift_test.yaml @@ -42,6 +42,7 @@ tests: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -59,6 +60,7 @@ tests: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -156,6 +158,7 @@ tests: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "https" bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/values.schema.json b/charts/k8s-monitoring/charts/feature-cluster-metrics/values.schema.json index c4a7698da..756f80bf9 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/values.schema.json +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/values.schema.json @@ -14,6 +14,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "maxCacheSize": { "type": "null" }, @@ -42,6 +45,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "maxCacheSize": { "type": "null" }, @@ -213,6 +219,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "labelMatchers": { "type": "object", "properties": { @@ -277,6 +286,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "labelMatchers": { "type": "object", "properties": { @@ -367,6 +379,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "maxCacheSize": { "type": "null" }, @@ -401,6 +416,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "maxCacheSize": { "type": "null" }, @@ -435,6 +453,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "maxCacheSize": { "type": "null" }, @@ -472,6 +493,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "maxCacheSize": { "type": "null" }, @@ -503,6 +527,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "maxCacheSize": { "type": "null" }, @@ -534,6 +561,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "maxCacheSize": { "type": "null" }, @@ -620,6 +650,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "labelMatchers": { "type": "object", "properties": { @@ -693,6 +726,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "labelMatchers": { "type": "object", "properties": { @@ -872,6 +908,9 @@ "extraMetricProcessingRules": { "type": "string" }, + "jobLabel": { + "type": "string" + }, "labelMatchers": { "type": "object", "properties": { diff --git a/charts/k8s-monitoring/charts/feature-cluster-metrics/values.yaml b/charts/k8s-monitoring/charts/feature-cluster-metrics/values.yaml index f2736c6a2..bd3d14297 100644 --- a/charts/k8s-monitoring/charts/feature-cluster-metrics/values.yaml +++ b/charts/k8s-monitoring/charts/feature-cluster-metrics/values.yaml @@ -47,6 +47,10 @@ kubelet: # @section -- Kubelet enabled: true + # -- The value for the job label. + # @section -- Kubelet + jobLabel: "integrations/kubernetes/kubelet" + # -- Rule blocks to be added to the prometheus.relabel component for Kubelet metrics. # These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. # ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) @@ -84,6 +88,10 @@ kubeletResource: # @section -- Kubelet Resources enabled: true + # -- The value for the job label. + # @section -- Kubelet Resources + jobLabel: "integrations/kubernetes/resources" + # -- Rule blocks to be added to the prometheus.relabel component for Kubelet Resources metrics. # These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. # ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) @@ -121,6 +129,10 @@ cadvisor: # @section -- cAdvisor enabled: true + # -- The value for the job label. + # @section -- cAdvisor + jobLabel: "integrations/kubernetes/cadvisor" + # -- Rule blocks to be added to the prometheus.relabel component for cAdvisor metrics. # These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. # ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) @@ -176,6 +188,10 @@ apiServer: # @section -- API Server enabled: + # -- The value for the job label. + # @section -- API Server + jobLabel: "integrations/kubernetes/kube-apiserver" + # -- How frequently to scrape metrics from the API Server # Overrides metrics.scrapeInterval # @default -- 60s @@ -212,6 +228,10 @@ kubeControllerManager: # @section -- Kube Controller Manager enabled: + # -- The value for the job label. + # @section -- Kube Controller Manager + jobLabel: "kube-controller-manager" + # -- Port number used by the Kube Controller Manager, set by `--secure-port.` # @section -- Kube Controller Manager port: 10257 @@ -259,6 +279,10 @@ kubeDNS: # @section -- KubeDNS enabled: + # -- The value for the job label. + # @section -- KubeDNS + jobLabel: "integrations/kubernetes/kube-dns" + # -- How frequently to scrape metrics from KubeDNS # Overrides metrics.scrapeInterval # @default -- 60s @@ -295,6 +319,10 @@ kubeProxy: # @section -- Kube Proxy enabled: + # -- The value for the job label. + # @section -- Kube Proxy + jobLabel: "integrations/kubernetes/kube-proxy" + # -- Port number used by the Kube Proxy, set in `--metrics-bind-address`. # @section -- Kube Proxy port: 10249 @@ -342,6 +370,10 @@ kubeScheduler: # @section -- Kube Scheduler enabled: + # -- The value for the job label. + # @section -- Kube Scheduler + jobLabel: "kube-scheduler" + # -- Port number used by the Kube Scheduler, set by `--secure-port`. # @section -- Kube Scheduler port: 10259 @@ -409,6 +441,10 @@ kube-state-metrics: # @section -- kube-state-metrics extraDiscoveryRules: "" + # -- The value for the job label. + # @section -- kube-state-metrics + jobLabel: "integrations/kubernetes/kube-state-metrics" + # -- Rule blocks to be added to the prometheus.relabel component for kube-state-metrics metrics. # These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. # ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) @@ -507,6 +543,10 @@ node-exporter: # @section -- Node Exporter extraDiscoveryRules: "" + # -- The value for the job label. + # @section -- Node Exporter + jobLabel: "integrations/node_exporter" + # -- Rule blocks to be added to the prometheus.relabel component for Node Exporter metrics. # These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. # ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) @@ -611,6 +651,10 @@ windows-exporter: # @section -- Windows Exporter extraDiscoveryRules: "" + # -- The value for the job label. + # @section -- Windows Exporter + jobLabel: "integrations/windows-exporter" + # -- Rule blocks to be added to the prometheus.relabel component for Windows Exporter metrics. # These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. # ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) @@ -680,6 +724,10 @@ kepler: # @section -- Kepler extraDiscoveryRules: "" + # -- The value for the job label. + # @section -- Kepler + jobLabel: "integrations/kepler" + # -- Rule blocks to be added to the prometheus.relabel component for Kepler. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) # These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no __meta* labels are present. # @section -- Kepler @@ -754,6 +802,10 @@ opencost: # @section -- OpenCost extraDiscoveryRules: "" + # -- The value for the job label. + # @section -- OpenCost + jobLabel: "integrations/opencost" + # -- Rule blocks to be added to the prometheus.relabel component for OpenCost. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block)) # These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no __meta* labels are present. # @section -- OpenCost diff --git a/charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-metrics.alloy index 7082f8e0c..f1152c687 100644 --- a/charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-metrics.alloy @@ -300,6 +300,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -317,6 +318,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -417,6 +419,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/auth/oauth2/output.yaml b/charts/k8s-monitoring/docs/examples/auth/oauth2/output.yaml index 4d6b642e4..440476a4f 100644 --- a/charts/k8s-monitoring/docs/examples/auth/oauth2/output.yaml +++ b/charts/k8s-monitoring/docs/examples/auth/oauth2/output.yaml @@ -440,6 +440,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -457,6 +458,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -557,6 +559,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy index 6756e889c..6898f9c76 100644 --- a/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy @@ -74,6 +74,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -91,6 +92,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -191,6 +193,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/auth/sigv4/output.yaml b/charts/k8s-monitoring/docs/examples/auth/sigv4/output.yaml index b7c05472a..a17b4aa5c 100644 --- a/charts/k8s-monitoring/docs/examples/auth/sigv4/output.yaml +++ b/charts/k8s-monitoring/docs/examples/auth/sigv4/output.yaml @@ -183,6 +183,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -200,6 +201,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -300,6 +302,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/collector-storage/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/collector-storage/alloy-metrics.alloy index 8fcfcb32b..d6c835b77 100644 --- a/charts/k8s-monitoring/docs/examples/collector-storage/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/collector-storage/alloy-metrics.alloy @@ -64,6 +64,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -81,6 +82,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -181,6 +183,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/collector-storage/output.yaml b/charts/k8s-monitoring/docs/examples/collector-storage/output.yaml index 59093e70c..8386665b0 100644 --- a/charts/k8s-monitoring/docs/examples/collector-storage/output.yaml +++ b/charts/k8s-monitoring/docs/examples/collector-storage/output.yaml @@ -178,6 +178,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -195,6 +196,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -295,6 +297,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/destinations/otlp-endpoint/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/destinations/otlp-endpoint/alloy-metrics.alloy index 32905d1f7..52f83924f 100644 --- a/charts/k8s-monitoring/docs/examples/destinations/otlp-endpoint/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/destinations/otlp-endpoint/alloy-metrics.alloy @@ -86,6 +86,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -103,6 +104,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -203,6 +205,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/destinations/otlp-endpoint/output.yaml b/charts/k8s-monitoring/docs/examples/destinations/otlp-endpoint/output.yaml index 7b777b70e..a41a947ef 100644 --- a/charts/k8s-monitoring/docs/examples/destinations/otlp-endpoint/output.yaml +++ b/charts/k8s-monitoring/docs/examples/destinations/otlp-endpoint/output.yaml @@ -211,6 +211,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -228,6 +229,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -328,6 +330,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/extra-rules/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/extra-rules/alloy-metrics.alloy index 4f9d78a0c..99d7b4df9 100644 --- a/charts/k8s-monitoring/docs/examples/extra-rules/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/extra-rules/alloy-metrics.alloy @@ -73,6 +73,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -90,6 +91,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -190,6 +192,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml b/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml index 1a9b019aa..680f93507 100644 --- a/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml +++ b/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml @@ -203,6 +203,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -220,6 +221,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -320,6 +322,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-metrics.alloy index ae8626a50..c6dcc71ec 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-metrics.alloy @@ -64,6 +64,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -81,6 +82,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -162,6 +164,7 @@ declare "cluster_metrics" { kubernetes.apiserver "scrape" { clustering = true + job_label = "integrations/kubernetes/kube-apiserver" scrape_interval = "60s" max_cache_size = 100000 forward_to = argument.metrics_destinations.value @@ -204,6 +207,7 @@ declare "cluster_metrics" { kubernetes.kube_dns "scrape" { clustering = true + job_label = "integrations/kubernetes/kube-dns" scrape_interval = "60s" max_cache_size = 100000 forward_to = argument.metrics_destinations.value @@ -296,6 +300,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml index 4ffd291c5..ab7c849f3 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml @@ -194,6 +194,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -211,6 +212,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -292,6 +294,7 @@ data: kubernetes.apiserver "scrape" { clustering = true + job_label = "integrations/kubernetes/kube-apiserver" scrape_interval = "60s" max_cache_size = 100000 forward_to = argument.metrics_destinations.value @@ -334,6 +337,7 @@ data: kubernetes.kube_dns "scrape" { clustering = true + job_label = "integrations/kubernetes/kube-dns" scrape_interval = "60s" max_cache_size = 100000 forward_to = argument.metrics_destinations.value @@ -426,6 +430,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/alloy-metrics.alloy index 99f33ddac..1a8345134 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/alloy-metrics.alloy @@ -64,6 +64,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -81,6 +82,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -181,6 +183,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/output.yaml index c95dc20cc..091b0a78a 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/output.yaml @@ -190,6 +190,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -207,6 +208,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -307,6 +309,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/istio-service-mesh/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/istio-service-mesh/alloy-metrics.alloy index 861e167d5..29465dee3 100644 --- a/charts/k8s-monitoring/docs/examples/istio-service-mesh/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/istio-service-mesh/alloy-metrics.alloy @@ -273,6 +273,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -290,6 +291,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -390,6 +392,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/istio-service-mesh/output.yaml b/charts/k8s-monitoring/docs/examples/istio-service-mesh/output.yaml index bf2a7dce7..94ff215a3 100644 --- a/charts/k8s-monitoring/docs/examples/istio-service-mesh/output.yaml +++ b/charts/k8s-monitoring/docs/examples/istio-service-mesh/output.yaml @@ -371,6 +371,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -388,6 +389,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -488,6 +490,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-singleton.alloy index ff7c33682..fb3699bef 100644 --- a/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/meta-monitoring/alloy-singleton.alloy @@ -81,6 +81,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -186,6 +187,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/meta-monitoring/output.yaml b/charts/k8s-monitoring/docs/examples/meta-monitoring/output.yaml index 6f72fa1fe..feacdb878 100644 --- a/charts/k8s-monitoring/docs/examples/meta-monitoring/output.yaml +++ b/charts/k8s-monitoring/docs/examples/meta-monitoring/output.yaml @@ -123,6 +123,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -228,6 +229,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/metrics-tuning/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/metrics-tuning/alloy-metrics.alloy index ede55164b..da3378709 100644 --- a/charts/k8s-monitoring/docs/examples/metrics-tuning/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/metrics-tuning/alloy-metrics.alloy @@ -283,6 +283,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|kubelet_node_name|kubernetes_build_info" scrape_interval = "60s" max_cache_size = 100000 @@ -300,6 +301,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -400,6 +402,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" scheme = "http" scrape_interval = "60s" max_cache_size = 100000 diff --git a/charts/k8s-monitoring/docs/examples/metrics-tuning/output.yaml b/charts/k8s-monitoring/docs/examples/metrics-tuning/output.yaml index 2ccaed3f2..03e2076e2 100644 --- a/charts/k8s-monitoring/docs/examples/metrics-tuning/output.yaml +++ b/charts/k8s-monitoring/docs/examples/metrics-tuning/output.yaml @@ -381,6 +381,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|kubelet_node_name|kubernetes_build_info" scrape_interval = "60s" max_cache_size = 100000 @@ -398,6 +399,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -498,6 +500,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" scheme = "http" scrape_interval = "60s" max_cache_size = 100000 diff --git a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-metrics.alloy index 1702bfcf2..a947e06f3 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-metrics.alloy @@ -64,6 +64,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -81,6 +82,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -181,6 +183,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml index a10c23a2b..dd4e07341 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml @@ -194,6 +194,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -211,6 +212,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -311,6 +313,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-metrics.alloy index 87a32de93..718098f42 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-metrics.alloy @@ -64,6 +64,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -81,6 +82,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -181,6 +183,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml index 48bb925e9..4d86d8d8b 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml @@ -177,6 +177,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -194,6 +195,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -294,6 +296,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-metrics.alloy index 8f37c9738..9dcc51cff 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-metrics.alloy @@ -64,6 +64,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -81,6 +82,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -181,6 +183,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml index f3e50d244..1757c894a 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml @@ -177,6 +177,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -194,6 +195,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -294,6 +296,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-metrics.alloy index e55e0ac7a..59d34fba4 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-metrics.alloy @@ -64,6 +64,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -81,6 +82,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -180,6 +182,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "https" bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" diff --git a/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml index f05a05313..3b3240293 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml @@ -173,6 +173,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -190,6 +191,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -289,6 +291,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "https" bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" diff --git a/charts/k8s-monitoring/docs/examples/private-image-registries/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/private-image-registries/alloy-metrics.alloy index 24393cee2..7ededde28 100644 --- a/charts/k8s-monitoring/docs/examples/private-image-registries/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/private-image-registries/alloy-metrics.alloy @@ -64,6 +64,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -81,6 +82,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -181,6 +183,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/private-image-registries/output.yaml b/charts/k8s-monitoring/docs/examples/private-image-registries/output.yaml index ee87677b4..89464617a 100644 --- a/charts/k8s-monitoring/docs/examples/private-image-registries/output.yaml +++ b/charts/k8s-monitoring/docs/examples/private-image-registries/output.yaml @@ -184,6 +184,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -201,6 +202,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -301,6 +303,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/proxies/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/proxies/alloy-metrics.alloy index 659ba5c8e..c0b4a0847 100644 --- a/charts/k8s-monitoring/docs/examples/proxies/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/proxies/alloy-metrics.alloy @@ -65,6 +65,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -82,6 +83,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -182,6 +184,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/proxies/output.yaml b/charts/k8s-monitoring/docs/examples/proxies/output.yaml index 5685d9908..c155830ed 100644 --- a/charts/k8s-monitoring/docs/examples/proxies/output.yaml +++ b/charts/k8s-monitoring/docs/examples/proxies/output.yaml @@ -227,6 +227,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -244,6 +245,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -344,6 +346,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/scalability/autoscaling/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/scalability/autoscaling/alloy-metrics.alloy index e884d23ec..f48771bae 100644 --- a/charts/k8s-monitoring/docs/examples/scalability/autoscaling/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/scalability/autoscaling/alloy-metrics.alloy @@ -64,6 +64,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -81,6 +82,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -181,6 +183,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/scalability/autoscaling/output.yaml b/charts/k8s-monitoring/docs/examples/scalability/autoscaling/output.yaml index 445b05064..c3e723bf8 100644 --- a/charts/k8s-monitoring/docs/examples/scalability/autoscaling/output.yaml +++ b/charts/k8s-monitoring/docs/examples/scalability/autoscaling/output.yaml @@ -162,6 +162,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -179,6 +180,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -279,6 +281,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/scalability/sharded-kube-state-metrics/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/scalability/sharded-kube-state-metrics/alloy-metrics.alloy index c2205564c..17f5e85b5 100644 --- a/charts/k8s-monitoring/docs/examples/scalability/sharded-kube-state-metrics/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/scalability/sharded-kube-state-metrics/alloy-metrics.alloy @@ -64,6 +64,7 @@ declare "cluster_metrics" { kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -81,6 +82,7 @@ declare "cluster_metrics" { kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -181,6 +183,7 @@ declare "cluster_metrics" { kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/docs/examples/scalability/sharded-kube-state-metrics/output.yaml b/charts/k8s-monitoring/docs/examples/scalability/sharded-kube-state-metrics/output.yaml index 414c1d5b7..dfb7d7447 100644 --- a/charts/k8s-monitoring/docs/examples/scalability/sharded-kube-state-metrics/output.yaml +++ b/charts/k8s-monitoring/docs/examples/scalability/sharded-kube-state-metrics/output.yaml @@ -162,6 +162,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -179,6 +180,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -279,6 +281,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/tests/integration/auth/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/auth/.rendered/output.yaml index 1836db1e9..b21018d26 100644 --- a/charts/k8s-monitoring/tests/integration/auth/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/integration/auth/.rendered/output.yaml @@ -490,6 +490,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 diff --git a/charts/k8s-monitoring/tests/integration/cluster-monitoring/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/cluster-monitoring/.rendered/output.yaml index a147489bb..e53cd0c3c 100644 --- a/charts/k8s-monitoring/tests/integration/cluster-monitoring/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/integration/cluster-monitoring/.rendered/output.yaml @@ -254,6 +254,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -271,6 +272,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -371,6 +373,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/tests/integration/control-plane-monitoring/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/control-plane-monitoring/.rendered/output.yaml index dd8236206..72905b41e 100644 --- a/charts/k8s-monitoring/tests/integration/control-plane-monitoring/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/integration/control-plane-monitoring/.rendered/output.yaml @@ -206,6 +206,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -223,6 +224,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -304,6 +306,7 @@ data: kubernetes.apiserver "scrape" { clustering = true + job_label = "integrations/kubernetes/kube-apiserver" scrape_interval = "60s" max_cache_size = 100000 forward_to = argument.metrics_destinations.value @@ -346,6 +349,7 @@ data: kubernetes.kube_dns "scrape" { clustering = true + job_label = "integrations/kubernetes/kube-dns" scrape_interval = "60s" max_cache_size = 100000 forward_to = argument.metrics_destinations.value @@ -438,6 +442,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/tests/integration/integration-grafana/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/integration-grafana/.rendered/output.yaml index 2242d3752..f1cee3970 100644 --- a/charts/k8s-monitoring/tests/integration/integration-grafana/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/integration/integration-grafana/.rendered/output.yaml @@ -226,6 +226,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -243,6 +244,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -343,6 +345,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/tests/integration/integration-loki/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/integration-loki/.rendered/output.yaml index cbb25efff..366a07625 100644 --- a/charts/k8s-monitoring/tests/integration/integration-loki/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/integration/integration-loki/.rendered/output.yaml @@ -226,6 +226,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -243,6 +244,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -343,6 +345,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/tests/integration/istio-service-mesh/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/istio-service-mesh/.rendered/output.yaml index e92e0759b..c494eab10 100644 --- a/charts/k8s-monitoring/tests/integration/istio-service-mesh/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/integration/istio-service-mesh/.rendered/output.yaml @@ -435,6 +435,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -452,6 +453,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -552,6 +554,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/tests/integration/prometheus-io-annotations/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/prometheus-io-annotations/.rendered/output.yaml index aa44e3ec6..452e3ab85 100644 --- a/charts/k8s-monitoring/tests/integration/prometheus-io-annotations/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/integration/prometheus-io-annotations/.rendered/output.yaml @@ -371,6 +371,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -388,6 +389,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -488,6 +490,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/tests/integration/sharded-kube-state-metrics/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/sharded-kube-state-metrics/.rendered/output.yaml index 8637b89dd..b218f6f0d 100644 --- a/charts/k8s-monitoring/tests/integration/sharded-kube-state-metrics/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/integration/sharded-kube-state-metrics/.rendered/output.yaml @@ -371,6 +371,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -388,6 +389,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -488,6 +490,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/tests/platform/eks-with-windows/.rendered/output.yaml b/charts/k8s-monitoring/tests/platform/eks-with-windows/.rendered/output.yaml index 65029a673..983312e99 100644 --- a/charts/k8s-monitoring/tests/platform/eks-with-windows/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/platform/eks-with-windows/.rendered/output.yaml @@ -207,6 +207,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -224,6 +225,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -324,6 +326,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/tests/platform/gke-autopilot/.rendered/output.yaml b/charts/k8s-monitoring/tests/platform/gke-autopilot/.rendered/output.yaml index ef815f898..3ad336dd4 100644 --- a/charts/k8s-monitoring/tests/platform/gke-autopilot/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/platform/gke-autopilot/.rendered/output.yaml @@ -161,6 +161,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s" diff --git a/charts/k8s-monitoring/tests/platform/grafana-cloud-features/k8s-monitoring/.rendered/output.yaml b/charts/k8s-monitoring/tests/platform/grafana-cloud-features/k8s-monitoring/.rendered/output.yaml index 7cfbd9d65..0b4491257 100644 --- a/charts/k8s-monitoring/tests/platform/grafana-cloud-features/k8s-monitoring/.rendered/output.yaml +++ b/charts/k8s-monitoring/tests/platform/grafana-cloud-features/k8s-monitoring/.rendered/output.yaml @@ -235,6 +235,7 @@ data: kubernetes.kubelet "scrape" { clustering = true + job_label = "integrations/kubernetes/kubelet" keep_metrics = "up|scrape_samples_scraped|go_goroutines|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_free|kubelet_volume_stats_inodes_used|kubelet_volume_stats_used_bytes|kubernetes_build_info|namespace_workload_pod|process_cpu_seconds_total|process_resident_memory_bytes|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" scrape_interval = "60s" max_cache_size = 100000 @@ -252,6 +253,7 @@ data: kubernetes.cadvisor "scrape" { clustering = true + job_label = "integrations/kubernetes/cadvisor" keep_metrics = "up|scrape_samples_scraped|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" scrape_interval = "60s" max_cache_size = 100000 @@ -352,6 +354,7 @@ data: kube_state_metrics.scrape "metrics" { targets = kube_state_metrics.kubernetes.targets.output clustering = true + job_label = "integrations/kubernetes/kube-state-metrics" keep_metrics = "up|scrape_samples_scraped|kube_configmap_info|kube_configmap_metadata_resource_version|kube_daemonset.*|kube_deployment_metadata_generation|kube_deployment_spec_replicas|kube_deployment_status_condition|kube_deployment_status_observed_generation|kube_deployment_status_replicas_available|kube_deployment_status_replicas_updated|kube_horizontalpodautoscaler_spec_max_replicas|kube_horizontalpodautoscaler_spec_min_replicas|kube_horizontalpodautoscaler_status_current_replicas|kube_horizontalpodautoscaler_status_desired_replicas|kube_job.*|kube_namespace_status_phase|kube_node.*|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_access_mode|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_labels|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_status_phase|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_requests|kube_pod_container_status_last_terminated_reason|kube_pod_container_status_restarts_total|kube_pod_container_status_waiting_reason|kube_pod_info|kube_pod_owner|kube_pod_spec_volumes_persistentvolumeclaims_info|kube_pod_start_time|kube_pod_status_phase|kube_pod_status_reason|kube_replicaset.*|kube_resourcequota|kube_secret_metadata_resource_version|kube_statefulset.*" scheme = "http" scrape_interval = "60s"