From f1a221e49de4f5cd8eae8c03544bd95be42380b7 Mon Sep 17 00:00:00 2001 From: Pete Wall Date: Sat, 21 Dec 2024 13:21:58 -0600 Subject: [PATCH] Fix the type of endpoint params (#1029) Signed-off-by: Pete Wall --- .../destinations/loki-values.yaml | 4 +- .../destinations/otlp-values.yaml | 4 +- .../destinations/prometheus-values.yaml | 4 +- .../destinations/pyroscope-values.yaml | 4 +- .../k8s-monitoring/docs/destinations/loki.md | 4 +- .../k8s-monitoring/docs/destinations/otlp.md | 4 +- .../docs/destinations/prometheus.md | 4 +- .../docs/destinations/pyroscope.md | 4 +- .../docs/examples/auth/oauth2/README.md | 56 + .../examples/auth/oauth2/alloy-logs.alloy | 371 ++ .../examples/auth/oauth2/alloy-metrics.alloy | 540 +++ .../auth/oauth2/alloy-singleton.alloy | 196 + .../docs/examples/auth/oauth2/description.txt | 3 + .../docs/examples/auth/oauth2/output.yaml | 4124 +++++++++++++++++ .../docs/examples/auth/oauth2/values.yaml | 44 + .../schema-mods/types-and-enums.json | 32 +- .../destinations/_destination_loki.tpl | 2 +- .../destinations/_destination_otlp.tpl | 2 +- .../destinations/_destination_prometheus.tpl | 2 +- .../destinations/_destination_pyroscope.tpl | 2 +- charts/k8s-monitoring/values.schema.json | 52 + 21 files changed, 5434 insertions(+), 24 deletions(-) create mode 100644 charts/k8s-monitoring/docs/examples/auth/oauth2/README.md create mode 100644 charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-logs.alloy create mode 100644 charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-metrics.alloy create mode 100644 charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-singleton.alloy create mode 100644 charts/k8s-monitoring/docs/examples/auth/oauth2/description.txt create mode 100644 charts/k8s-monitoring/docs/examples/auth/oauth2/output.yaml create mode 100644 charts/k8s-monitoring/docs/examples/auth/oauth2/values.yaml diff --git a/charts/k8s-monitoring/destinations/loki-values.yaml b/charts/k8s-monitoring/destinations/loki-values.yaml index 6df6771be..117304381 100644 --- a/charts/k8s-monitoring/destinations/loki-values.yaml +++ b/charts/k8s-monitoring/destinations/loki-values.yaml @@ -94,7 +94,7 @@ auth: # -- Raw config for accessing the client ID # @section -- Authentication - OAuth2 clientIdFrom: "" - # -- Prometheus OAuth2 client secret + # -- OAuth2 client secret # @section -- Authentication - OAuth2 clientSecret: "" # -- The key for the client secret property in the secret @@ -106,7 +106,7 @@ auth: # -- File containing the OAuth2 client secret. # @section -- Authentication - OAuth2 clientSecretFile: "" - # -- Prometheus OAuth2 endpoint parameters + # -- OAuth2 endpoint parameters # @section -- Authentication - OAuth2 endpointParams: {} # -- HTTP proxy to send requests through. diff --git a/charts/k8s-monitoring/destinations/otlp-values.yaml b/charts/k8s-monitoring/destinations/otlp-values.yaml index 382efef79..32e9ca131 100644 --- a/charts/k8s-monitoring/destinations/otlp-values.yaml +++ b/charts/k8s-monitoring/destinations/otlp-values.yaml @@ -104,7 +104,7 @@ auth: # -- Raw config for accessing the client ID # @section -- Authentication - OAuth2 clientIdFrom: "" - # -- Prometheus OAuth2 client secret + # -- OAuth2 client secret # @section -- Authentication - OAuth2 clientSecret: "" # -- The key for the client secret property in the secret @@ -116,7 +116,7 @@ auth: # -- File containing the OAuth2 client secret. # @section -- Authentication - OAuth2 clientSecretFile: "" - # -- Prometheus OAuth2 endpoint parameters + # -- OAuth2 endpoint parameters # @section -- Authentication - OAuth2 endpointParams: {} # -- HTTP proxy to send requests through. diff --git a/charts/k8s-monitoring/destinations/prometheus-values.yaml b/charts/k8s-monitoring/destinations/prometheus-values.yaml index 76a5e7ce7..58fd30e71 100644 --- a/charts/k8s-monitoring/destinations/prometheus-values.yaml +++ b/charts/k8s-monitoring/destinations/prometheus-values.yaml @@ -103,7 +103,7 @@ auth: # -- Raw config for accessing the client ID # @section -- Authentication - OAuth2 clientIdFrom: "" - # -- Prometheus OAuth2 client secret + # -- OAuth2 client secret # @section -- Authentication - OAuth2 clientSecret: "" # -- The key for the client secret property in the secret @@ -115,7 +115,7 @@ auth: # -- File containing the OAuth2 client secret. # @section -- Authentication - OAuth2 clientSecretFile: "" - # -- Prometheus OAuth2 endpoint parameters + # -- OAuth2 endpoint parameters # @section -- Authentication - OAuth2 endpointParams: {} # -- HTTP proxy to send requests through. diff --git a/charts/k8s-monitoring/destinations/pyroscope-values.yaml b/charts/k8s-monitoring/destinations/pyroscope-values.yaml index 900ff207e..376893aa2 100644 --- a/charts/k8s-monitoring/destinations/pyroscope-values.yaml +++ b/charts/k8s-monitoring/destinations/pyroscope-values.yaml @@ -82,7 +82,7 @@ auth: # -- Raw config for accessing the client ID # @section -- Authentication - OAuth2 clientIdFrom: "" - # -- Prometheus OAuth2 client secret + # -- OAuth2 client secret # @section -- Authentication - OAuth2 clientSecret: "" # -- The key for the client secret property in the secret @@ -94,7 +94,7 @@ auth: # -- File containing the OAuth2 client secret. # @section -- Authentication - OAuth2 clientSecretFile: "" - # -- Prometheus OAuth2 endpoint parameters + # -- OAuth2 endpoint parameters # @section -- Authentication - OAuth2 endpointParams: {} # -- HTTP proxy to send requests through. diff --git a/charts/k8s-monitoring/docs/destinations/loki.md b/charts/k8s-monitoring/docs/destinations/loki.md index a7dc3d607..9d54d19ea 100644 --- a/charts/k8s-monitoring/docs/destinations/loki.md +++ b/charts/k8s-monitoring/docs/destinations/loki.md @@ -20,11 +20,11 @@ This defines the options for defining a destination for logs that use the Loki p | auth.oauth2.clientId | string | `""` | OAuth2 client ID | | auth.oauth2.clientIdFrom | string | `""` | Raw config for accessing the client ID | | auth.oauth2.clientIdKey | string | `"clientId"` | The key for the client ID property in the secret | -| auth.oauth2.clientSecret | string | `""` | Prometheus OAuth2 client secret | +| auth.oauth2.clientSecret | string | `""` | OAuth2 client secret | | auth.oauth2.clientSecretFile | string | `""` | File containing the OAuth2 client secret. | | auth.oauth2.clientSecretFrom | string | `""` | Raw config for accessing the client secret | | auth.oauth2.clientSecretKey | string | `"clientSecret"` | The key for the client secret property in the secret | -| auth.oauth2.endpointParams | object | `{}` | Prometheus OAuth2 endpoint parameters | +| auth.oauth2.endpointParams | object | `{}` | OAuth2 endpoint parameters | | auth.oauth2.noProxy | string | `""` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | auth.oauth2.proxyConnectHeader | object | `{}` | Specifies headers to send to proxies during CONNECT requests. | | auth.oauth2.proxyFromEnvironment | bool | `false` | Use the proxy URL indicated by environment variables. | diff --git a/charts/k8s-monitoring/docs/destinations/otlp.md b/charts/k8s-monitoring/docs/destinations/otlp.md index 5285af6ae..331942d71 100644 --- a/charts/k8s-monitoring/docs/destinations/otlp.md +++ b/charts/k8s-monitoring/docs/destinations/otlp.md @@ -20,11 +20,11 @@ This defines the options for defining a destination for OpenTelemetry data that | auth.oauth2.clientId | string | `""` | OAuth2 client ID | | auth.oauth2.clientIdFrom | string | `""` | Raw config for accessing the client ID | | auth.oauth2.clientIdKey | string | `"clientId"` | The key for the client ID property in the secret | -| auth.oauth2.clientSecret | string | `""` | Prometheus OAuth2 client secret | +| auth.oauth2.clientSecret | string | `""` | OAuth2 client secret | | auth.oauth2.clientSecretFile | string | `""` | File containing the OAuth2 client secret. | | auth.oauth2.clientSecretFrom | string | `""` | Raw config for accessing the client secret | | auth.oauth2.clientSecretKey | string | `"clientSecret"` | The key for the client secret property in the secret | -| auth.oauth2.endpointParams | object | `{}` | Prometheus OAuth2 endpoint parameters | +| auth.oauth2.endpointParams | object | `{}` | OAuth2 endpoint parameters | | auth.oauth2.noProxy | string | `""` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | auth.oauth2.proxyConnectHeader | object | `{}` | Specifies headers to send to proxies during CONNECT requests. | | auth.oauth2.proxyFromEnvironment | bool | `false` | Use the proxy URL indicated by environment variables. | diff --git a/charts/k8s-monitoring/docs/destinations/prometheus.md b/charts/k8s-monitoring/docs/destinations/prometheus.md index 25e9c8d3b..8e9a7363b 100644 --- a/charts/k8s-monitoring/docs/destinations/prometheus.md +++ b/charts/k8s-monitoring/docs/destinations/prometheus.md @@ -20,11 +20,11 @@ This defines the options for defining a destination for metrics that use the Pro | auth.oauth2.clientId | string | `""` | OAuth2 client ID | | auth.oauth2.clientIdFrom | string | `""` | Raw config for accessing the client ID | | auth.oauth2.clientIdKey | string | `"clientId"` | The key for the client ID property in the secret | -| auth.oauth2.clientSecret | string | `""` | Prometheus OAuth2 client secret | +| auth.oauth2.clientSecret | string | `""` | OAuth2 client secret | | auth.oauth2.clientSecretFile | string | `""` | File containing the OAuth2 client secret. | | auth.oauth2.clientSecretFrom | string | `""` | Raw config for accessing the client secret | | auth.oauth2.clientSecretKey | string | `"clientSecret"` | The key for the client secret property in the secret | -| auth.oauth2.endpointParams | object | `{}` | Prometheus OAuth2 endpoint parameters | +| auth.oauth2.endpointParams | object | `{}` | OAuth2 endpoint parameters | | auth.oauth2.noProxy | string | `""` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | auth.oauth2.proxyConnectHeader | object | `{}` | Specifies headers to send to proxies during CONNECT requests. | | auth.oauth2.proxyFromEnvironment | bool | `false` | Use the proxy URL indicated by environment variables. | diff --git a/charts/k8s-monitoring/docs/destinations/pyroscope.md b/charts/k8s-monitoring/docs/destinations/pyroscope.md index 15c6c0955..861f426ee 100644 --- a/charts/k8s-monitoring/docs/destinations/pyroscope.md +++ b/charts/k8s-monitoring/docs/destinations/pyroscope.md @@ -19,11 +19,11 @@ This defines the options for defining a destination for profiles that use the Py | auth.oauth2.clientId | string | `""` | OAuth2 client ID | | auth.oauth2.clientIdFrom | string | `""` | Raw config for accessing the client ID | | auth.oauth2.clientIdKey | string | `"clientId"` | The key for the client ID property in the secret | -| auth.oauth2.clientSecret | string | `""` | Prometheus OAuth2 client secret | +| auth.oauth2.clientSecret | string | `""` | OAuth2 client secret | | auth.oauth2.clientSecretFile | string | `""` | File containing the OAuth2 client secret. | | auth.oauth2.clientSecretFrom | string | `""` | Raw config for accessing the client secret | | auth.oauth2.clientSecretKey | string | `"clientSecret"` | The key for the client secret property in the secret | -| auth.oauth2.endpointParams | object | `{}` | Prometheus OAuth2 endpoint parameters | +| auth.oauth2.endpointParams | object | `{}` | OAuth2 endpoint parameters | | auth.oauth2.noProxy | string | `""` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | auth.oauth2.proxyConnectHeader | object | `{}` | Specifies headers to send to proxies during CONNECT requests. | | auth.oauth2.proxyFromEnvironment | bool | `false` | Use the proxy URL indicated by environment variables. | diff --git a/charts/k8s-monitoring/docs/examples/auth/oauth2/README.md b/charts/k8s-monitoring/docs/examples/auth/oauth2/README.md new file mode 100644 index 000000000..eaecc075e --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/oauth2/README.md @@ -0,0 +1,56 @@ + +# OAuth2 Authentication + +This example demonstrates how to use OAuth2 for authentication. + +## Values + +```yaml +cluster: + name: oauth2-auth-example + +destinations: + - name: otel-endpoint + type: otlp + url: "grpc.my.otel.endpoint:443" + auth: + type: oauth2 + oauth2: + tokenURL: "https://my.idp/application/o/token/" + clientId: "my-client-id" + clientSecretFile: "/var/run/secrets/kubernetes.io/serviceaccount/token" + endpointParams: + grant_type: ["client_credentials"] + client_assertion_type: ["urn:ietf:params:oauth:client-assertion-type:jwt-bearer"] + logs: {enabled: true} + metrics: {enabled: true} + traces: {enabled: true} + +clusterMetrics: + enabled: true + +clusterEvents: + enabled: true + +podLogs: + enabled: true + +nodeLogs: + enabled: true + +prometheusOperatorObjects: + enabled: true + +annotationAutodiscovery: + enabled: true + +alloy-logs: + enabled: true +alloy-metrics: + enabled: true +alloy-singleton: + enabled: true +``` diff --git a/charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-logs.alloy b/charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-logs.alloy new file mode 100644 index 000000000..601322651 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-logs.alloy @@ -0,0 +1,371 @@ +// Destination: otel-endpoint (otlp) +otelcol.receiver.prometheus "otel_endpoint" { + output { + metrics = [otelcol.processor.attributes.otel_endpoint.input] + } +} +otelcol.receiver.loki "otel_endpoint" { + output { + logs = [otelcol.processor.attributes.otel_endpoint.input] + } +} +otelcol.auth.oauth2 "otel_endpoint" { + client_id = nonsensitive(remote.kubernetes.secret.otel_endpoint.data["clientId"]) + client_secret_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + endpoint_params = { + client_assertion_type = ["urn:ietf:params:oauth:client-assertion-type:jwt-bearer"], + grant_type = ["client_credentials"], + } + token_url = "https://my.idp/application/o/token/" +} + +otelcol.processor.attributes "otel_endpoint" { + action { + key = "cluster" + action = "upsert" + value = "oauth2-auth-example" + } + action { + key = "k8s.cluster.name" + action = "upsert" + value = "oauth2-auth-example" + } + output { + metrics = [otelcol.processor.transform.otel_endpoint.input] + logs = [otelcol.processor.transform.otel_endpoint.input] + traces = [otelcol.processor.transform.otel_endpoint.input] + } +} + +otelcol.processor.transform "otel_endpoint" { + error_mode = "ignore" + + output { + metrics = [otelcol.processor.batch.otel_endpoint.input] + logs = [otelcol.processor.batch.otel_endpoint.input] + traces = [otelcol.processor.batch.otel_endpoint.input] + } +} + +otelcol.processor.batch "otel_endpoint" { + timeout = "2s" + send_batch_size = 8192 + send_batch_max_size = 0 + + output { + metrics = [otelcol.exporter.otlp.otel_endpoint.input] + logs = [otelcol.exporter.otlp.otel_endpoint.input] + traces = [otelcol.exporter.otlp.otel_endpoint.input] + } +} +otelcol.exporter.otlp "otel_endpoint" { + client { + endpoint = "grpc.my.otel.endpoint:443" + auth = otelcol.auth.oauth2.otel_endpoint.handler + tls { + insecure = false + insecure_skip_verify = false + } + } +} + +remote.kubernetes.secret "otel_endpoint" { + name = "otel-endpoint-k8smon-k8s-monitoring" + namespace = "default" +} + +// Feature: Node Logs +declare "node_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.relabel "journal" { + + // copy all journal labels and make the available to the pipeline stages as labels, there is a label + // keep defined to filter out unwanted labels, these pipeline labels can be set as structured metadata + // as well, the following labels are available: + // - boot_id + // - cap_effective + // - cmdline + // - comm + // - exe + // - gid + // - hostname + // - machine_id + // - pid + // - stream_id + // - systemd_cgroup + // - systemd_invocation_id + // - systemd_slice + // - systemd_unit + // - transport + // - uid + // + // More Info: https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html + rule { + action = "labelmap" + regex = "__journal__(.+)" + } + + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "unit" + } + + // the service_name label will be set automatically in loki if not set, and the unit label + // will not allow service_name to be set automatically. + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "service_name" + } + + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component + } + + loki.source.journal "worker" { + path = "/var/log/journal" + format_as_json = false + max_age = "8h" + relabel_rules = loki.relabel.journal.rules + labels = { + job = "integrations/kubernetes/journal", + instance = sys.env("HOSTNAME"), + } + forward_to = [loki.process.journal_logs.receiver] + } + + loki.process "journal_logs" { + stage.static_labels { + values = { + // add a static source label to the logs so they can be differentiated / restricted if necessary + "source" = "journal", + // default level to unknown + level = "unknown", + } + } + + // Attempt to determine the log level, most k8s workers are either in logfmt or klog formats + // check to see if the log line matches the klog format (https://github.com/kubernetes/klog) + stage.match { + // unescaped regex: ([IWED][0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]+) + selector = "{level=\"unknown\"} |~ \"([IWED][0-9]{4}\\\\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\\\\.[0-9]+)\"" + + // extract log level, klog uses a single letter code for the level followed by the month and day i.e. I0119 + stage.regex { + expression = "((?P[A-Z])[0-9])" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(I)" + replace = "INFO" + } + + // if the extracted level is W set WARN + stage.replace { + source = "level" + expression = "(W)" + replace = "WARN" + } + + // if the extracted level is E set ERROR + stage.replace { + source = "level" + expression = "(E)" + replace = "ERROR" + } + + // if the extracted level is I set INFO + stage.replace { + source = "level" + expression = "(D)" + replace = "DEBUG" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // if the level is still unknown, do one last attempt at detecting it based on common levels + stage.match { + selector = "{level=\"unknown\"}" + + // unescaped regex: (?i)(?:"(?:level|loglevel|levelname|lvl|levelText|SeverityText)":\s*"|\s*(?:level|loglevel|levelText|lvl)="?|\s+\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))("|\s+|-|\s*\]) + stage.regex { + expression = "(?i)(?:\"(?:level|loglevel|levelname|lvl|levelText|SeverityText)\":\\s*\"|\\s*(?:level|loglevel|levelText|lvl)=\"?|\\s+\\[?)(?P(DEBUG?|DBG|INFO?(RMATION)?|WA?RN(ING)?|ERR(OR)?|CRI?T(ICAL)?|FATAL|FTL|NOTICE|TRACE|TRC|PANIC|PNC|ALERT|EMERGENCY))(\"|\\s+|-|\\s*\\])" + } + + // set the extracted level to be a label + stage.labels { + values = { + level = "", + } + } + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["instance","job","level","name","unit","service_name","source"] + } + + forward_to = argument.logs_destinations.value + } +} +node_logs "feature" { + logs_destinations = [ + otelcol.receiver.loki.otel_endpoint.receiver, + ] +} + +// Feature: Pod Logs +declare "pod_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + discovery.relabel "filtered_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["__meta_kubernetes_namespace"] + action = "replace" + target_label = "namespace" + } + rule { + source_labels = ["__meta_kubernetes_pod_name"] + action = "replace" + target_label = "pod" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "container" + } + rule { + source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "$1" + target_label = "job" + } + + // set the container runtime as a label + rule { + action = "replace" + source_labels = ["__meta_kubernetes_pod_container_id"] + regex = "^(\\S+):\\/\\/.+$" + replacement = "$1" + target_label = "tmp_container_runtime" + } + + // set the job label from the k8s.grafana.com/logs.job annotation if it exists + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_logs_job"] + regex = "(.+)" + target_label = "job" + } + + // make all labels on the pod available to the pipeline as labels, + // they are omitted before write to loki via stage.label_keep unless explicitly set + rule { + action = "labelmap" + regex = "__meta_kubernetes_pod_label_(.+)" + } + + // make all annotations on the pod available to the pipeline as labels, + // they are omitted before write to loki via stage.label_keep unless explicitly set + rule { + action = "labelmap" + regex = "__meta_kubernetes_pod_annotation_(.+)" + } + } + + discovery.kubernetes "pods" { + role = "pod" + selectors { + role = "pod" + field = "spec.nodeName=" + sys.env("HOSTNAME") + } + } + + discovery.relabel "filtered_pods_with_paths" { + targets = discovery.relabel.filtered_pods.output + + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "/var/log/pods/*$1/*.log" + target_label = "__path__" + } + } + + local.file_match "pod_logs" { + path_targets = discovery.relabel.filtered_pods_with_paths.output + } + + loki.source.file "pod_logs" { + targets = local.file_match.pod_logs.targets + forward_to = [loki.process.pod_logs.receiver] + } + + loki.process "pod_logs" { + stage.match { + selector = "{tmp_container_runtime=~\"containerd|cri-o\"}" + // the cri processing stage extracts the following k/v pairs: log, stream, time, flags + stage.cri {} + + // Set the extract flags and stream values as labels + stage.labels { + values = { + flags = "", + stream = "", + } + } + } + + stage.match { + selector = "{tmp_container_runtime=\"docker\"}" + // the docker processing stage extracts the following k/v pairs: log, stream, time + stage.docker {} + + // Set the extract stream value as a label + stage.labels { + values = { + stream = "", + } + } + } + + // Drop the filename label, since it's not really useful in the context of Kubernetes, where we already have cluster, + // namespace, pod, and container labels. Drop any structured metadata. Also drop the temporary + // container runtime label as it is no longer needed. + stage.label_drop { + values = [ + "filename", + "tmp_container_runtime", + ] + } + + // Only keep the labels that are defined in the `keepLabels` list. + stage.label_keep { + values = ["app_kubernetes_io_name","container","instance","job","level","namespace","pod","service_name"] + } + + forward_to = argument.logs_destinations.value + } +} +pod_logs "feature" { + logs_destinations = [ + otelcol.receiver.loki.otel_endpoint.receiver, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-metrics.alloy new file mode 100644 index 000000000..e9efc2b78 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/oauth2/alloy-metrics.alloy @@ -0,0 +1,540 @@ +// Destination: otel-endpoint (otlp) +otelcol.receiver.prometheus "otel_endpoint" { + output { + metrics = [otelcol.processor.attributes.otel_endpoint.input] + } +} +otelcol.receiver.loki "otel_endpoint" { + output { + logs = [otelcol.processor.attributes.otel_endpoint.input] + } +} +otelcol.auth.oauth2 "otel_endpoint" { + client_id = nonsensitive(remote.kubernetes.secret.otel_endpoint.data["clientId"]) + client_secret_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + endpoint_params = { + client_assertion_type = ["urn:ietf:params:oauth:client-assertion-type:jwt-bearer"], + grant_type = ["client_credentials"], + } + token_url = "https://my.idp/application/o/token/" +} + +otelcol.processor.attributes "otel_endpoint" { + action { + key = "cluster" + action = "upsert" + value = "oauth2-auth-example" + } + action { + key = "k8s.cluster.name" + action = "upsert" + value = "oauth2-auth-example" + } + output { + metrics = [otelcol.processor.transform.otel_endpoint.input] + logs = [otelcol.processor.transform.otel_endpoint.input] + traces = [otelcol.processor.transform.otel_endpoint.input] + } +} + +otelcol.processor.transform "otel_endpoint" { + error_mode = "ignore" + + output { + metrics = [otelcol.processor.batch.otel_endpoint.input] + logs = [otelcol.processor.batch.otel_endpoint.input] + traces = [otelcol.processor.batch.otel_endpoint.input] + } +} + +otelcol.processor.batch "otel_endpoint" { + timeout = "2s" + send_batch_size = 8192 + send_batch_max_size = 0 + + output { + metrics = [otelcol.exporter.otlp.otel_endpoint.input] + logs = [otelcol.exporter.otlp.otel_endpoint.input] + traces = [otelcol.exporter.otlp.otel_endpoint.input] + } +} +otelcol.exporter.otlp "otel_endpoint" { + client { + endpoint = "grpc.my.otel.endpoint:443" + auth = otelcol.auth.oauth2.otel_endpoint.handler + tls { + insecure = false + insecure_skip_verify = false + } + } +} + +remote.kubernetes.secret "otel_endpoint" { + name = "otel-endpoint-k8smon-k8s-monitoring" + namespace = "default" +} + +// Feature: Annotation Autodiscovery +declare "annotation_autodiscovery" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.relabel "annotation_autodiscovery_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_scrape"] + regex = "true" + action = "keep" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_job"] + action = "replace" + target_label = "job" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_instance"] + action = "replace" + target_label = "instance" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_path"] + action = "replace" + target_label = "__metrics_path__" + } + + // Choose the pod port + // The discovery generates a target for each declared container port of the pod. + // If the metricsPortName annotation has value, keep only the target where the port name matches the one of the annotation. + rule { + source_labels = ["__meta_kubernetes_pod_container_port_name"] + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_portName"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_port_name"] + action = "keepequal" + target_label = "__tmp_port" + } + + // If the metrics port number annotation has a value, override the target address to use it, regardless whether it is + // one of the declared ports on that Pod. + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_portNumber", "__meta_kubernetes_pod_ip"] + regex = "(\\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})" + replacement = "[$2]:$1" // IPv6 + target_label = "__address__" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_portNumber", "__meta_kubernetes_pod_ip"] + regex = "(\\d+);((([0-9]+?)(\\.|$)){4})" // IPv4, takes priority over IPv6 when both exists + replacement = "$2:$1" + target_label = "__address__" + } + + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_scheme"] + action = "replace" + target_label = "__scheme__" + } + + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_scrapeInterval"] + action = "replace" + target_label = "__scrape_interval__" + } + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.relabel "annotation_autodiscovery_services" { + targets = discovery.kubernetes.services.targets + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_scrape"] + regex = "true" + action = "keep" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_job"] + action = "replace" + target_label = "job" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_instance"] + action = "replace" + target_label = "instance" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_path"] + action = "replace" + target_label = "__metrics_path__" + } + + // Choose the service port + rule { + source_labels = ["__meta_kubernetes_service_port_name"] + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_portName"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_port_name"] + action = "keepequal" + target_label = "__tmp_port" + } + + rule { + source_labels = ["__meta_kubernetes_service_port_number"] + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_portNumber"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_port_number"] + action = "keepequal" + target_label = "__tmp_port" + } + + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_scheme"] + action = "replace" + target_label = "__scheme__" + } + + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_scrapeInterval"] + action = "replace" + target_label = "__scrape_interval__" + } + } + + discovery.relabel "annotation_autodiscovery_http" { + targets = concat(discovery.relabel.annotation_autodiscovery_pods.output, discovery.relabel.annotation_autodiscovery_services.output) + rule { + source_labels = ["__scheme__"] + regex = "https" + action = "drop" + } + } + + discovery.relabel "annotation_autodiscovery_https" { + targets = concat(discovery.relabel.annotation_autodiscovery_pods.output, discovery.relabel.annotation_autodiscovery_services.output) + rule { + source_labels = ["__scheme__"] + regex = "https" + action = "keep" + } + } + + prometheus.scrape "annotation_autodiscovery_http" { + targets = discovery.relabel.annotation_autodiscovery_http.output + scrape_interval = "60s" + honor_labels = true + bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + clustering { + enabled = true + } + + forward_to = argument.metrics_destinations.value + } + + prometheus.scrape "annotation_autodiscovery_https" { + targets = discovery.relabel.annotation_autodiscovery_https.output + scrape_interval = "60s" + honor_labels = true + bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + tls_config { + insecure_skip_verify = true + } + clustering { + enabled = true + } + + forward_to = argument.metrics_destinations.value + } +} +annotation_autodiscovery "feature" { + metrics_destinations = [ + otelcol.receiver.prometheus.otel_endpoint.receiver, + ] +} + +// Feature: Cluster Metrics +declare "cluster_metrics" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + remote.kubernetes.configmap "kubernetes" { + name = "k8smon-alloy-module-kubernetes" + namespace = "default" + } + + import.string "kubernetes" { + content = remote.kubernetes.configmap.kubernetes.data["core_metrics.alloy"] + } + + kubernetes.kubelet "scrape" { + clustering = true + keep_metrics = "up|container_cpu_usage_seconds_total|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_used|kubernetes_build_info|namespace_workload_pod|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" + scrape_interval = "60s" + max_cache_size = 100000 + forward_to = argument.metrics_destinations.value + } + + kubernetes.resources "scrape" { + clustering = true + job_label = "integrations/kubernetes/resources" + keep_metrics = "up|node_cpu_usage_seconds_total|node_memory_working_set_bytes" + scrape_interval = "60s" + max_cache_size = 100000 + forward_to = argument.metrics_destinations.value + } + + kubernetes.cadvisor "scrape" { + clustering = true + keep_metrics = "up|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" + scrape_interval = "60s" + max_cache_size = 100000 + forward_to = [prometheus.relabel.cadvisor.receiver] + } + + prometheus.relabel "cadvisor" { + max_cache_size = 100000 + // Drop empty container labels, addressing https://github.com/google/cadvisor/issues/2688 + rule { + source_labels = ["__name__","container"] + separator = "@" + regex = "(container_cpu_.*|container_fs_.*|container_memory_.*)@" + action = "drop" + } + // Drop empty image labels, addressing https://github.com/google/cadvisor/issues/2688 + rule { + source_labels = ["__name__","image"] + separator = "@" + regex = "(container_cpu_.*|container_fs_.*|container_memory_.*|container_network_.*)@" + action = "drop" + } + // Normalizing unimportant labels (not deleting to continue satisfying