diff --git a/examples/application-observability/metrics.alloy b/examples/application-observability/metrics.alloy index 73bc2b832..eca82b8e2 100644 --- a/examples/application-observability/metrics.alloy +++ b/examples/application-observability/metrics.alloy @@ -757,6 +757,18 @@ prometheus.remote_write "metrics_service" { } send_native_histograms = false + + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } } wal { diff --git a/examples/application-observability/output.yaml b/examples/application-observability/output.yaml index 082c44208..31de8ad6d 100644 --- a/examples/application-observability/output.yaml +++ b/examples/application-observability/output.yaml @@ -100,13 +100,13 @@ metadata: name: k8smon-prometheus-node-exporter namespace: default labels: - helm.sh/chart: prometheus-node-exporter-4.34.0 + helm.sh/chart: prometheus-node-exporter-4.36.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: metrics app.kubernetes.io/part-of: prometheus-node-exporter app.kubernetes.io/name: prometheus-node-exporter app.kubernetes.io/instance: k8smon - app.kubernetes.io/version: "1.8.0" + app.kubernetes.io/version: "1.8.1" --- # Source: k8s-monitoring/templates/log-service-credentials.yaml apiVersion: v1 @@ -927,6 +927,18 @@ data: } send_native_histograms = false + + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } } wal { @@ -2077,7 +2089,7 @@ data: metrics.prom: | # HELP grafana_kubernetes_monitoring_build_info A metric to report the version of the Kubernetes Monitoring Helm chart as well as a summary of enabled features # TYPE grafana_kubernetes_monitoring_build_info gauge - grafana_kubernetes_monitoring_build_info{version="1.0.13", namespace="default", metrics="enabled,alloy,autoDiscover,kube-state-metrics,node-exporter,kubelet,cadvisor,cost", logs="enabled,events,pod_logs", traces="enabled", deployments="kube-state-metrics,prometheus-node-exporter,prometheus-operator-crds,opencost"} 1 + grafana_kubernetes_monitoring_build_info{version="1.1.0", namespace="default", metrics="enabled,alloy,autoDiscover,kube-state-metrics,node-exporter,kubelet,cadvisor,cost", logs="enabled,events,pod_logs", traces="enabled", deployments="kube-state-metrics,prometheus-node-exporter,prometheus-operator-crds,opencost"} 1 --- # Source: k8s-monitoring/charts/prometheus-operator-crds/charts/crds/templates/crd-alertmanagerconfigs.yaml apiVersion: apiextensions.k8s.io/v1 @@ -52875,13 +52887,13 @@ metadata: name: k8smon-prometheus-node-exporter namespace: default labels: - helm.sh/chart: prometheus-node-exporter-4.34.0 + helm.sh/chart: prometheus-node-exporter-4.36.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: metrics app.kubernetes.io/part-of: prometheus-node-exporter app.kubernetes.io/name: prometheus-node-exporter app.kubernetes.io/instance: k8smon - app.kubernetes.io/version: "1.8.0" + app.kubernetes.io/version: "1.8.1" spec: type: ClusterIP ports: @@ -53132,13 +53144,13 @@ metadata: name: k8smon-prometheus-node-exporter namespace: default labels: - helm.sh/chart: prometheus-node-exporter-4.34.0 + helm.sh/chart: prometheus-node-exporter-4.36.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: metrics app.kubernetes.io/part-of: prometheus-node-exporter app.kubernetes.io/name: prometheus-node-exporter app.kubernetes.io/instance: k8smon - app.kubernetes.io/version: "1.8.0" + app.kubernetes.io/version: "1.8.1" spec: selector: matchLabels: @@ -53154,13 +53166,13 @@ spec: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "true" labels: - helm.sh/chart: prometheus-node-exporter-4.34.0 + helm.sh/chart: prometheus-node-exporter-4.36.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: metrics app.kubernetes.io/part-of: prometheus-node-exporter app.kubernetes.io/name: prometheus-node-exporter app.kubernetes.io/instance: k8smon - app.kubernetes.io/version: "1.8.0" + app.kubernetes.io/version: "1.8.1" spec: automountServiceAccountToken: false securityContext: @@ -53171,7 +53183,7 @@ spec: serviceAccountName: k8smon-prometheus-node-exporter containers: - name: node-exporter - image: quay.io/prometheus/node-exporter:v1.8.0 + image: quay.io/prometheus/node-exporter:v1.8.1 imagePullPolicy: IfNotPresent args: - --path.procfs=/host/proc @@ -53681,8 +53693,8 @@ metadata: labels: app.kubernetes.io/managed-by: "Helm" app.kubernetes.io/instance: "k8smon" - app.kubernetes.io/version: 2.3.4 - helm.sh/chart: "k8s-monitoring-1.0.13" + app.kubernetes.io/version: 2.3.5 + helm.sh/chart: "k8s-monitoring-1.1.0" annotations: "helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook-weight": "-5" @@ -54448,6 +54460,18 @@ data: } send_native_histograms = false + + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } } wal { @@ -55573,8 +55597,8 @@ metadata: labels: app.kubernetes.io/managed-by: "Helm" app.kubernetes.io/instance: "k8smon" - app.kubernetes.io/version: 2.3.4 - helm.sh/chart: "k8s-monitoring-1.0.13" + app.kubernetes.io/version: 2.3.5 + helm.sh/chart: "k8s-monitoring-1.1.0" annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded @@ -55627,8 +55651,8 @@ metadata: labels: app.kubernetes.io/managed-by: "Helm" app.kubernetes.io/instance: "k8smon" - app.kubernetes.io/version: 2.3.4 - helm.sh/chart: "k8s-monitoring-1.0.13" + app.kubernetes.io/version: 2.3.5 + helm.sh/chart: "k8s-monitoring-1.1.0" annotations: "helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook-weight": "-5" @@ -55706,8 +55730,8 @@ metadata: labels: app.kubernetes.io/managed-by: "Helm" app.kubernetes.io/instance: "k8smon" - app.kubernetes.io/version: 2.3.4 - helm.sh/chart: "k8s-monitoring-1.0.13" + app.kubernetes.io/version: 2.3.5 + helm.sh/chart: "k8s-monitoring-1.1.0" annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": before-hook-creation @@ -55718,7 +55742,7 @@ spec: kubernetes.io/os: linux containers: - name: config-analysis - image: ghcr.io/grafana/k8s-monitoring-test:1.0.13 + image: ghcr.io/grafana/k8s-monitoring-test:1.1.0 command: [/etc/bin/config-analysis.sh] env: - name: ALLOY_HOST @@ -55733,8 +55757,8 @@ metadata: labels: app.kubernetes.io/managed-by: "Helm" app.kubernetes.io/instance: "k8smon" - app.kubernetes.io/version: 2.3.4 - helm.sh/chart: "k8s-monitoring-1.0.13" + app.kubernetes.io/version: 2.3.5 + helm.sh/chart: "k8s-monitoring-1.1.0" annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": before-hook-creation @@ -55750,14 +55774,14 @@ spec: labels: app.kubernetes.io/managed-by: "Helm" app.kubernetes.io/instance: "k8smon" - helm.sh/chart: "k8s-monitoring-1.0.13" + helm.sh/chart: "k8s-monitoring-1.1.0" spec: restartPolicy: Never nodeSelector: kubernetes.io/os: linux containers: - name: query-test - image: ghcr.io/grafana/k8s-monitoring-test:1.0.13 + image: ghcr.io/grafana/k8s-monitoring-test:1.1.0 command: ["bash", "-c", "/etc/bin/query-test.sh /etc/test/testQueries.json"] volumeMounts: - name: test-files